aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorjeanPerier <jperier@nvidia.com>2024-02-15 00:12:46 -0800
committerFangrui Song <i@maskray.me>2024-02-15 00:12:46 -0800
commit819ebcf2bbc3dfc80f949d4bfebcd1cb797e3a01 (patch)
tree5291de0236d0d8f1ebf7a071d79bb7dcdafaa768
parentdd123907114fbeeba2178f6b2d6f349f24c491f8 (diff)
parent5f6e0f35f936495563b5758a7ff9d4417a9f651b (diff)
downloadllvm-819ebcf2bbc3dfc80f949d4bfebcd1cb797e3a01.zip
llvm-819ebcf2bbc3dfc80f949d4bfebcd1cb797e3a01.tar.gz
llvm-819ebcf2bbc3dfc80f949d4bfebcd1cb797e3a01.tar.bz2
[𝘀𝗽𝗿] changes introduced through rebase
Created using spr 1.3.4 [skip ci]
-rwxr-xr-x.ci/generate-buildkite-pipeline-premerge5
-rw-r--r--.github/workflows/llvm-project-tests.yml10
-rw-r--r--bolt/lib/Core/DebugData.cpp45
-rw-r--r--bolt/lib/Rewrite/DWARFRewriter.cpp9
-rw-r--r--bolt/test/X86/dwarf4-subprogram-single-gc-ranges.test6
-rw-r--r--bolt/test/X86/dwarf5-empty-function-ranges.s538
-rw-r--r--bolt/test/X86/dwarf5-loclist-out-of-order.s485
-rw-r--r--bolt/test/X86/dwarf5-subprogram-single-gc-ranges.test6
-rw-r--r--clang-tools-extra/clang-tidy/bugprone/UnusedLocalNonTrivialVariableCheck.cpp1
-rw-r--r--clang-tools-extra/clang-tidy/modernize/UseOverrideCheck.cpp12
-rw-r--r--clang-tools-extra/clang-tidy/readability/RedundantInlineSpecifierCheck.cpp10
-rw-r--r--clang-tools-extra/clangd/unittests/DiagnosticsTests.cpp42
-rw-r--r--clang-tools-extra/docs/ReleaseNotes.rst12
-rw-r--r--clang-tools-extra/docs/clang-tidy/checks/bugprone/unused-local-non-trivial-variable.rst1
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/bugprone/unused-local-non-trivial-variable.cpp1
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/modernize/use-override.cpp5
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/readability/redundant-inline-specifier.cpp14
-rw-r--r--clang/docs/ClangFormatStyleOptions.rst221
-rw-r--r--clang/docs/LanguageExtensions.rst33
-rw-r--r--clang/docs/ReleaseNotes.rst25
-rw-r--r--clang/include/clang-c/Index.h6
-rw-r--r--clang/include/clang/AST/RecursiveASTVisitor.h22
-rw-r--r--clang/include/clang/AST/StmtOpenACC.h142
-rw-r--r--clang/include/clang/AST/StmtVisitor.h3
-rw-r--r--clang/include/clang/AST/TextNodeDumper.h1
-rw-r--r--clang/include/clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def3
-rw-r--r--clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h15
-rw-r--r--clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h8
-rw-r--r--clang/include/clang/Basic/Attr.td7
-rw-r--r--clang/include/clang/Basic/AttrDocs.td23
-rw-r--r--clang/include/clang/Basic/Builtins.td6
-rw-r--r--clang/include/clang/Basic/DiagnosticDriverKinds.td3
-rw-r--r--clang/include/clang/Basic/DiagnosticGroups.td11
-rw-r--r--clang/include/clang/Basic/DiagnosticLexKinds.td5
-rw-r--r--clang/include/clang/Basic/DiagnosticSemaKinds.td8
-rw-r--r--clang/include/clang/Basic/FileManager.h4
-rw-r--r--clang/include/clang/Basic/IdentifierTable.h104
-rw-r--r--clang/include/clang/Basic/OpenACCKinds.h31
-rw-r--r--clang/include/clang/Basic/StmtNodes.td6
-rw-r--r--clang/include/clang/Basic/TokenKinds.def22
-rw-r--r--clang/include/clang/Basic/TokenKinds.h8
-rw-r--r--clang/include/clang/Driver/Action.h12
-rw-r--r--clang/include/clang/Driver/Options.td24
-rw-r--r--clang/include/clang/Driver/Types.def1
-rw-r--r--clang/include/clang/Format/Format.h11
-rw-r--r--clang/include/clang/Frontend/CompilerInstance.h7
-rw-r--r--clang/include/clang/Frontend/CompilerInvocation.h9
-rw-r--r--clang/include/clang/Frontend/FrontendActions.h10
-rw-r--r--clang/include/clang/Frontend/FrontendOptions.h3
-rw-r--r--clang/include/clang/Frontend/InstallAPIOptions.h28
-rw-r--r--clang/include/clang/InstallAPI/Context.h65
-rw-r--r--clang/include/clang/Sema/DeclSpec.h13
-rw-r--r--clang/include/clang/Sema/Sema.h3
-rw-r--r--clang/include/clang/Serialization/ASTBitCodes.h3
-rw-r--r--clang/lib/AST/ASTStructuralEquivalence.cpp1
-rw-r--r--clang/lib/AST/CMakeLists.txt1
-rw-r--r--clang/lib/AST/Interp/ByteCodeExprGen.cpp122
-rw-r--r--clang/lib/AST/Interp/ByteCodeExprGen.h3
-rw-r--r--clang/lib/AST/Interp/ByteCodeStmtGen.cpp2
-rw-r--r--clang/lib/AST/Interp/Context.cpp3
-rw-r--r--clang/lib/AST/Interp/EvalEmitter.cpp2
-rw-r--r--clang/lib/AST/Interp/Function.h10
-rw-r--r--clang/lib/AST/Interp/Interp.cpp25
-rw-r--r--clang/lib/AST/Interp/Interp.h79
-rw-r--r--clang/lib/AST/Interp/InterpFrame.cpp17
-rw-r--r--clang/lib/AST/Interp/InterpFrame.h5
-rw-r--r--clang/lib/AST/Interp/Opcodes.td11
-rw-r--r--clang/lib/AST/Interp/Pointer.cpp6
-rw-r--r--clang/lib/AST/Interp/Program.cpp2
-rw-r--r--clang/lib/AST/Interp/Program.h2
-rw-r--r--clang/lib/AST/Interp/Source.cpp2
-rw-r--r--clang/lib/AST/Stmt.cpp1
-rw-r--r--clang/lib/AST/StmtOpenACC.cpp33
-rw-r--r--clang/lib/AST/StmtPrinter.cpp9
-rw-r--r--clang/lib/AST/StmtProfile.cpp7
-rw-r--r--clang/lib/AST/TextNodeDumper.cpp5
-rw-r--r--clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp74
-rw-r--r--clang/lib/Analysis/FlowSensitive/Transfer.cpp14
-rw-r--r--clang/lib/Analysis/UnsafeBufferUsage.cpp191
-rw-r--r--clang/lib/Basic/FileManager.cpp2
-rw-r--r--clang/lib/Basic/IdentifierTable.cpp18
-rw-r--r--clang/lib/Basic/Targets/AArch64.cpp16
-rw-r--r--clang/lib/Basic/Targets/AMDGPU.cpp3
-rw-r--r--clang/lib/Basic/Targets/ARM.cpp14
-rw-r--r--clang/lib/Basic/Targets/ARM.h1
-rw-r--r--clang/lib/CMakeLists.txt1
-rw-r--r--clang/lib/CodeGen/BackendConsumer.h16
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp8
-rw-r--r--clang/lib/CodeGen/CGCall.cpp50
-rw-r--r--clang/lib/CodeGen/CGExprScalar.cpp51
-rw-r--r--clang/lib/CodeGen/CGStmt.cpp3
-rw-r--r--clang/lib/CodeGen/CodeGenAction.cpp162
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.h10
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp14
-rw-r--r--clang/lib/CodeGen/CodeGenPGO.cpp53
-rw-r--r--clang/lib/CodeGen/CodeGenPGO.h8
-rw-r--r--clang/lib/CodeGen/CoverageMappingGen.cpp167
-rw-r--r--clang/lib/CodeGen/CoverageMappingGen.h14
-rw-r--r--clang/lib/CodeGen/LinkInModulesPass.cpp14
-rw-r--r--clang/lib/CodeGen/MCDCState.h36
-rw-r--r--clang/lib/Driver/Action.cpp7
-rw-r--r--clang/lib/Driver/Driver.cpp16
-rw-r--r--clang/lib/Driver/ToolChain.cpp1
-rw-r--r--clang/lib/Driver/ToolChains/Clang.cpp11
-rw-r--r--clang/lib/Driver/ToolChains/Flang.cpp15
-rw-r--r--clang/lib/Driver/ToolChains/Flang.h7
-rw-r--r--clang/lib/Format/ContinuationIndenter.cpp6
-rw-r--r--clang/lib/Format/Format.cpp77
-rw-r--r--clang/lib/Format/FormatTokenLexer.cpp12
-rw-r--r--clang/lib/Format/FormatTokenLexer.h6
-rw-r--r--clang/lib/Format/IntegerLiteralSeparatorFixer.cpp2
-rw-r--r--clang/lib/Format/TokenAnalyzer.cpp6
-rw-r--r--clang/lib/Format/TokenAnalyzer.h2
-rw-r--r--clang/lib/Format/TokenAnnotator.cpp9
-rw-r--r--clang/lib/Format/UnwrappedLineParser.cpp2
-rw-r--r--clang/lib/Frontend/CMakeLists.txt3
-rw-r--r--clang/lib/Frontend/CompilerInvocation.cpp41
-rw-r--r--clang/lib/Frontend/InitPreprocessor.cpp95
-rw-r--r--clang/lib/Frontend/InstallAPIConsumer.cpp43
-rw-r--r--clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp2
-rw-r--r--clang/lib/Headers/hlsl/hlsl_basic_types.h7
-rw-r--r--clang/lib/Headers/hlsl/hlsl_intrinsics.h172
-rw-r--r--clang/lib/InstallAPI/CMakeLists.txt12
-rw-r--r--clang/lib/InstallAPI/Context.cpp27
-rw-r--r--clang/lib/Lex/LiteralSupport.cpp16
-rw-r--r--clang/lib/Parse/ParseTentative.cpp6
-rw-r--r--clang/lib/Sema/DeclSpec.cpp9
-rw-r--r--clang/lib/Sema/SemaDecl.cpp37
-rw-r--r--clang/lib/Sema/SemaDeclAttr.cpp5
-rw-r--r--clang/lib/Sema/SemaDeclCXX.cpp137
-rw-r--r--clang/lib/Sema/SemaExceptionSpec.cpp1
-rw-r--r--clang/lib/Sema/SemaTemplateDeduction.cpp7
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiateDecl.cpp7
-rw-r--r--clang/lib/Sema/TreeTransform.h23
-rw-r--r--clang/lib/Serialization/ASTReader.cpp3
-rw-r--r--clang/lib/Serialization/ASTReaderStmt.cpp23
-rw-r--r--clang/lib/Serialization/ASTWriter.cpp4
-rw-r--r--clang/lib/Serialization/ASTWriterStmt.cpp21
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp55
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp10
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp238
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h24
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp83
-rw-r--r--clang/lib/StaticAnalyzer/Core/BugSuppression.cpp18
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngine.cpp1
-rw-r--r--clang/test/AST/Interp/c.c1
-rw-r--r--clang/test/AST/Interp/functions.cpp269
-rw-r--r--clang/test/AST/Interp/literals.cpp19
-rw-r--r--clang/test/Analysis/Checkers/WebKit/call-args-protected-return-value.cpp23
-rw-r--r--clang/test/Analysis/Checkers/WebKit/call-args-safe-functions.cpp (renamed from clang/test/Analysis/Checkers/WebKit/call-args-dynamic-downcast.cpp)21
-rw-r--r--clang/test/Analysis/Checkers/WebKit/call-args.cpp33
-rw-r--r--clang/test/Analysis/Checkers/WebKit/ref-cntbl-base-virtual-dtor.cpp10
-rw-r--r--clang/test/Analysis/Checkers/WebKit/uncounted-lambda-captures.cpp5
-rw-r--r--clang/test/Analysis/Checkers/WebKit/uncounted-local-vars.cpp1
-rw-r--r--clang/test/Analysis/Checkers/WebKit/uncounted-members.cpp9
-rw-r--r--clang/test/Analysis/Checkers/WebKit/uncounted-obj-arg.cpp251
-rw-r--r--clang/test/Analysis/ObjCRetSigs.m10
-rw-r--r--clang/test/Analysis/objc_invalidation.m17
-rw-r--r--clang/test/Analysis/scan-build/html_output.test8
-rw-r--r--clang/test/Analysis/suppression-attr-doc.cpp14
-rw-r--r--clang/test/Analysis/suppression-attr.cpp68
-rw-r--r--clang/test/Analysis/suppression-attr.m60
-rw-r--r--clang/test/Analysis/unused-ivars.m11
-rw-r--r--clang/test/C/C2x/n2549.c14
-rw-r--r--clang/test/CMakeLists.txt1
-rw-r--r--clang/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.type.elab/p3.cpp8
-rw-r--r--clang/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.type.elab/p4.cpp40
-rw-r--r--clang/test/CXX/drs/dr16xx.cpp8
-rw-r--r--clang/test/CXX/drs/dr201.cpp42
-rw-r--r--clang/test/CXX/drs/dr210.cpp41
-rw-r--r--clang/test/CXX/drs/dr23xx.cpp38
-rw-r--r--clang/test/CXX/drs/dr292.cpp30
-rw-r--r--clang/test/CXX/drs/dr2xx.cpp6
-rw-r--r--clang/test/CXX/over/over.match/over.match.best/p1-2a.cpp10
-rw-r--r--clang/test/CXX/temp/temp.decls/temp.class/temp.mem.enum/p1.cpp8
-rw-r--r--clang/test/CodeGen/RISCV/ntlh-intrinsics/riscv32-zihintntl.c350
-rw-r--r--clang/test/CodeGen/RISCV/riscv-metadata-arch.c20
-rw-r--r--clang/test/CodeGen/attr-riscv-rvv-vector-bits-bitcast.c36
-rw-r--r--clang/test/CodeGen/attr-riscv-rvv-vector-bits-call.c26
-rw-r--r--clang/test/CodeGen/attr-riscv-rvv-vector-bits-cast.c14
-rw-r--r--clang/test/CodeGen/attr-riscv-rvv-vector-bits-codegen.c21
-rw-r--r--clang/test/CodeGen/attr-riscv-rvv-vector-bits-globals.c26
-rw-r--r--clang/test/CodeGen/builtins.c6
-rw-r--r--clang/test/Driver/amdgpu-macros.cl4
-rw-r--r--clang/test/Driver/amdgpu-mcpu.cl8
-rw-r--r--clang/test/Driver/autocomplete.c1
-rw-r--r--clang/test/Driver/installapi.h13
-rw-r--r--clang/test/FixIt/fixit-c++11.cpp6
-rw-r--r--clang/test/Headers/__clang_hip_math.hip56
-rw-r--r--clang/test/InstallAPI/installapi-basic.test34
-rw-r--r--clang/test/Lexer/gnu-flags.c14
-rw-r--r--clang/test/Misc/target-invalid-cpu-note.c2
-rw-r--r--clang/test/Parser/cxx-decl.cpp3
-rw-r--r--clang/test/Parser/cxx0x-decl.cpp2
-rw-r--r--clang/test/Preprocessor/arm-target-features.c34
-rw-r--r--clang/test/Preprocessor/fixed-point.c67
-rw-r--r--clang/test/Preprocessor/no-fixed-point.c7
-rw-r--r--clang/test/SemaCXX/PR40395.cpp1
-rw-r--r--clang/test/SemaCXX/attr-suppress.cpp10
-rw-r--r--clang/test/SemaCXX/concept-crash-on-diagnostic.cpp1
-rw-r--r--clang/test/SemaCXX/cxx98-compat.cpp3
-rw-r--r--clang/test/SemaCXX/enum-scoped.cpp10
-rw-r--r--clang/test/SemaCXX/ms-uuid.cpp2
-rw-r--r--clang/test/SemaCXX/warn-unsafe-buffer-usage-array.cpp18
-rw-r--r--clang/test/SemaCXX/warn-unsafe-buffer-usage-debug.cpp2
-rw-r--r--clang/test/SemaCXX/warn-unsafe-buffer-usage-fixits-array-assign-to-ptr.cpp45
-rw-r--r--clang/test/SemaCXX/warn-unsafe-buffer-usage-fixits-pointer-access.cpp50
-rw-r--r--clang/test/SemaCXX/warn-unsafe-buffer-usage-fixits-pointer-arg-to-func-ptr-call.cpp49
-rw-r--r--clang/test/SemaCXX/warn-unsafe-buffer-usage.cpp56
-rw-r--r--clang/test/SemaObjC/attr-suppress.m19
-rw-r--r--clang/test/lit.cfg.py1
-rw-r--r--clang/tools/clang-offload-packager/ClangOffloadPackager.cpp53
-rw-r--r--clang/tools/libclang/CIndex.cpp2
-rw-r--r--clang/tools/libclang/CXCursor.cpp3
-rwxr-xr-xclang/tools/scan-build/bin/scan-build11
-rw-r--r--clang/tools/scan-build/share/scan-build/sorttable.js20
-rw-r--r--clang/unittests/Analysis/FlowSensitive/TransferTest.cpp36
-rw-r--r--clang/unittests/Format/ConfigParseTest.cpp36
-rw-r--r--clang/unittests/Format/DefinitionBlockSeparatorTest.cpp2
-rw-r--r--clang/unittests/Format/FormatTest.cpp22
-rw-r--r--clang/unittests/Format/FormatTestCSharp.cpp2
-rwxr-xr-xclang/www/cxx_dr_status.html6
-rw-r--r--compiler-rt/lib/asan_abi/asan_abi_shim.cpp2
-rw-r--r--compiler-rt/lib/ubsan/ubsan_signals_standalone.cpp5
-rw-r--r--compiler-rt/test/CMakeLists.txt6
-rw-r--r--compiler-rt/test/ubsan/TestCases/Misc/Linux/static-link.cpp3
-rw-r--r--flang/docs/FortranLLVMTestSuite.md2
-rw-r--r--flang/docs/index.md1
-rw-r--r--flang/include/flang/Optimizer/Dialect/FIRAttr.td24
-rw-r--r--flang/include/flang/Optimizer/Dialect/FIRDialect.h3
-rw-r--r--flang/include/flang/Optimizer/Dialect/FIROpsSupport.h10
-rw-r--r--flang/include/flang/Optimizer/Support/InitFIR.h1
-rw-r--r--flang/include/flang/Optimizer/Support/Utils.h139
-rw-r--r--flang/include/flang/Optimizer/Transforms/Utils.h38
-rw-r--r--flang/lib/Frontend/CompilerInvocation.cpp15
-rw-r--r--flang/lib/Lower/CallInterface.cpp59
-rw-r--r--flang/lib/Lower/OpenMP.cpp73
-rw-r--r--flang/lib/Optimizer/Dialect/CMakeLists.txt1
-rw-r--r--flang/lib/Optimizer/Dialect/FIRAttr.cpp3
-rw-r--r--flang/lib/Optimizer/Dialect/FIRDialect.cpp17
-rw-r--r--flang/lib/Optimizer/HLFIR/Transforms/BufferizeHLFIR.cpp4
-rw-r--r--flang/lib/Optimizer/HLFIR/Transforms/CMakeLists.txt1
-rw-r--r--flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp2
-rw-r--r--flang/lib/Optimizer/Transforms/SimplifyIntrinsics.cpp131
-rw-r--r--flang/lib/Semantics/canonicalize-omp.cpp2
-rw-r--r--flang/module/iso_c_binding.f904
-rw-r--r--flang/runtime/assign.cpp4
-rw-r--r--flang/runtime/derived.cpp59
-rw-r--r--flang/test/Driver/masm.f9010
-rw-r--r--flang/test/Driver/predefined-macros-powerpc.f9011
-rw-r--r--flang/test/Fir/convert-to-llvm-openmp-and-fir.fir20
-rw-r--r--flang/test/Lower/CUDA/cuda-proc-attribute.cuf9
-rw-r--r--flang/test/Lower/OpenMP/FIR/wsloop-reduction-add.f90432
-rw-r--r--flang/test/Lower/OpenMP/FIR/wsloop-reduction-iand.f908
-rw-r--r--flang/test/Lower/OpenMP/FIR/wsloop-reduction-ieor.f908
-rw-r--r--flang/test/Lower/OpenMP/FIR/wsloop-reduction-ior.f906
-rw-r--r--flang/test/Lower/OpenMP/FIR/wsloop-reduction-logical-and.f90137
-rw-r--r--flang/test/Lower/OpenMP/FIR/wsloop-reduction-logical-eqv.f90231
-rw-r--r--flang/test/Lower/OpenMP/FIR/wsloop-reduction-logical-neqv.f90233
-rw-r--r--flang/test/Lower/OpenMP/FIR/wsloop-reduction-logical-or.f90137
-rw-r--r--flang/test/Lower/OpenMP/FIR/wsloop-reduction-max.f9013
-rw-r--r--flang/test/Lower/OpenMP/FIR/wsloop-reduction-min.f9012
-rw-r--r--flang/test/Lower/OpenMP/FIR/wsloop-reduction-mul.f90274
-rw-r--r--flang/test/Lower/OpenMP/default-clause.f902
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-add-hlfir.f9065
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-add.f90499
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-iand.f9064
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-ieor.f909
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-ior.f9065
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-logical-and.f90255
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-logical-eqv.f90249
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-logical-neqv.f90254
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-logical-or.f90251
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-max-2.f903
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-max-hlfir.f9060
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-max.f90153
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-min.f90155
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-mul.f90468
-rw-r--r--libc/cmake/modules/CheckCompilerFeatures.cmake12
-rw-r--r--libc/cmake/modules/LLVMLibCObjectRules.cmake4
-rw-r--r--libc/cmake/modules/compiler_features/check_fixed_point.cpp5
-rw-r--r--libc/config/linux/aarch64/entrypoints.txt1
-rw-r--r--libc/config/linux/api.td1
-rw-r--r--libc/config/linux/riscv/entrypoints.txt1
-rw-r--r--libc/config/linux/x86_64/entrypoints.txt16
-rw-r--r--libc/config/linux/x86_64/headers.txt1
-rw-r--r--libc/docs/math/index.rst8
-rw-r--r--libc/docs/math/stdfix.rst136
-rw-r--r--libc/docs/stdbit.rst72
-rw-r--r--libc/include/CMakeLists.txt8
-rw-r--r--libc/include/llvm-libc-macros/CMakeLists.txt6
-rw-r--r--libc/include/llvm-libc-macros/stdbit-macros.h66
-rw-r--r--libc/include/llvm-libc-macros/stdfix-macros.h330
-rw-r--r--libc/include/stdfix.h.def21
-rw-r--r--libc/spec/stdc.td18
-rw-r--r--libc/spec/stdc_ext.td16
-rw-r--r--libc/src/__support/CMakeLists.txt11
-rw-r--r--libc/src/__support/CPP/CMakeLists.txt2
-rw-r--r--libc/src/__support/CPP/bit.h50
-rw-r--r--libc/src/__support/CPP/type_traits.h1
-rw-r--r--libc/src/__support/CPP/type_traits/is_fixed_point.h46
-rw-r--r--libc/src/__support/FPUtil/BasicOperations.h4
-rw-r--r--libc/src/__support/FPUtil/CMakeLists.txt37
-rw-r--r--libc/src/__support/FPUtil/FPBits.h9
-rw-r--r--libc/src/__support/FPUtil/ManipulationFunctions.h42
-rw-r--r--libc/src/__support/FPUtil/dyadic_float.h59
-rw-r--r--libc/src/__support/GPU/amdgpu/utils.h19
-rw-r--r--libc/src/__support/GPU/generic/utils.h4
-rw-r--r--libc/src/__support/GPU/nvptx/utils.h21
-rw-r--r--libc/src/__support/RPC/rpc.h152
-rw-r--r--libc/src/__support/RPC/rpc_util.h5
-rw-r--r--libc/src/__support/UInt.h761
-rw-r--r--libc/src/__support/fixed_point/CMakeLists.txt8
-rw-r--r--libc/src/__support/fixed_point/fx_rep.h175
-rw-r--r--libc/src/__support/float_to_string.h22
-rw-r--r--libc/src/__support/integer_literals.h173
-rw-r--r--libc/src/__support/integer_utils.h46
-rw-r--r--libc/src/math/CMakeLists.txt1
-rw-r--r--libc/src/math/generic/CMakeLists.txt23
-rw-r--r--libc/src/math/generic/acoshf.cpp6
-rw-r--r--libc/src/math/generic/asinhf.cpp4
-rw-r--r--libc/src/math/generic/ldexpf128.cpp19
-rw-r--r--libc/src/math/ldexpf128.h20
-rw-r--r--libc/src/stdbit/CMakeLists.txt3
-rw-r--r--libc/src/stdbit/stdc_first_leading_one_uc.cpp20
-rw-r--r--libc/src/stdbit/stdc_first_leading_one_uc.h18
-rw-r--r--libc/src/stdbit/stdc_first_leading_one_ui.cpp20
-rw-r--r--libc/src/stdbit/stdc_first_leading_one_ui.h18
-rw-r--r--libc/src/stdbit/stdc_first_leading_one_ul.cpp20
-rw-r--r--libc/src/stdbit/stdc_first_leading_one_ul.h18
-rw-r--r--libc/src/stdbit/stdc_first_leading_one_ull.cpp21
-rw-r--r--libc/src/stdbit/stdc_first_leading_one_ull.h18
-rw-r--r--libc/src/stdbit/stdc_first_leading_one_us.cpp21
-rw-r--r--libc/src/stdbit/stdc_first_leading_one_us.h18
-rw-r--r--libc/src/stdbit/stdc_first_trailing_one_uc.cpp21
-rw-r--r--libc/src/stdbit/stdc_first_trailing_one_uc.h18
-rw-r--r--libc/src/stdbit/stdc_first_trailing_one_ui.cpp20
-rw-r--r--libc/src/stdbit/stdc_first_trailing_one_ui.h18
-rw-r--r--libc/src/stdbit/stdc_first_trailing_one_ul.cpp21
-rw-r--r--libc/src/stdbit/stdc_first_trailing_one_ul.h18
-rw-r--r--libc/src/stdbit/stdc_first_trailing_one_ull.cpp21
-rw-r--r--libc/src/stdbit/stdc_first_trailing_one_ull.h18
-rw-r--r--libc/src/stdbit/stdc_first_trailing_one_us.cpp21
-rw-r--r--libc/src/stdbit/stdc_first_trailing_one_us.h18
-rw-r--r--libc/src/stdbit/stdc_first_trailing_zero_uc.cpp21
-rw-r--r--libc/src/stdbit/stdc_first_trailing_zero_uc.h18
-rw-r--r--libc/src/stdbit/stdc_first_trailing_zero_ui.cpp20
-rw-r--r--libc/src/stdbit/stdc_first_trailing_zero_ui.h18
-rw-r--r--libc/src/stdbit/stdc_first_trailing_zero_ul.cpp21
-rw-r--r--libc/src/stdbit/stdc_first_trailing_zero_ul.h18
-rw-r--r--libc/src/stdbit/stdc_first_trailing_zero_ull.cpp21
-rw-r--r--libc/src/stdbit/stdc_first_trailing_zero_ull.h18
-rw-r--r--libc/src/stdbit/stdc_first_trailing_zero_us.cpp21
-rw-r--r--libc/src/stdbit/stdc_first_trailing_zero_us.h18
-rw-r--r--libc/src/time/gpu/nanosleep.cpp20
-rw-r--r--libc/src/time/gpu/time_utils.h21
-rw-r--r--libc/test/UnitTest/CMakeLists.txt3
-rw-r--r--libc/test/include/stdbit_test.cpp45
-rw-r--r--libc/test/src/__support/CMakeLists.txt20
-rw-r--r--libc/test/src/__support/CPP/bit_test.cpp19
-rw-r--r--libc/test/src/__support/FPUtil/CMakeLists.txt1
-rw-r--r--libc/test/src/__support/FPUtil/dyadic_float_test.cpp34
-rw-r--r--libc/test/src/__support/FPUtil/fpbits_test.cpp390
-rw-r--r--libc/test/src/__support/RPC/rpc_smoke_test.cpp10
-rw-r--r--libc/test/src/__support/integer_literals_test.cpp134
-rw-r--r--libc/test/src/__support/uint_test.cpp48
-rw-r--r--libc/test/src/math/smoke/CMakeLists.txt18
-rw-r--r--libc/test/src/math/smoke/LdExpTest.h4
-rw-r--r--libc/test/src/math/smoke/ldexp_test.cpp2
-rw-r--r--libc/test/src/math/smoke/ldexpf128_test.cpp13
-rw-r--r--libc/test/src/math/smoke/ldexpf_test.cpp2
-rw-r--r--libc/test/src/math/smoke/ldexpl_test.cpp2
-rw-r--r--libc/test/src/stdbit/CMakeLists.txt3
-rw-r--r--libc/test/src/stdbit/stdc_first_leading_one_uc_test.cpp21
-rw-r--r--libc/test/src/stdbit/stdc_first_leading_one_ui_test.cpp21
-rw-r--r--libc/test/src/stdbit/stdc_first_leading_one_ul_test.cpp21
-rw-r--r--libc/test/src/stdbit/stdc_first_leading_one_ull_test.cpp21
-rw-r--r--libc/test/src/stdbit/stdc_first_leading_one_us_test.cpp21
-rw-r--r--libc/test/src/stdbit/stdc_first_trailing_one_uc_test.cpp20
-rw-r--r--libc/test/src/stdbit/stdc_first_trailing_one_ui_test.cpp20
-rw-r--r--libc/test/src/stdbit/stdc_first_trailing_one_ul_test.cpp20
-rw-r--r--libc/test/src/stdbit/stdc_first_trailing_one_ull_test.cpp20
-rw-r--r--libc/test/src/stdbit/stdc_first_trailing_one_us_test.cpp20
-rw-r--r--libc/test/src/stdbit/stdc_first_trailing_zero_uc_test.cpp20
-rw-r--r--libc/test/src/stdbit/stdc_first_trailing_zero_ui_test.cpp20
-rw-r--r--libc/test/src/stdbit/stdc_first_trailing_zero_ul_test.cpp20
-rw-r--r--libc/test/src/stdbit/stdc_first_trailing_zero_ull_test.cpp21
-rw-r--r--libc/test/src/stdbit/stdc_first_trailing_zero_us_test.cpp20
-rw-r--r--libc/utils/gpu/server/rpc_server.cpp56
-rw-r--r--libcxx/docs/Modules.rst4
-rw-r--r--libcxx/docs/Status/RangesAlgorithms.csv6
-rw-r--r--libcxx/include/CMakeLists.txt1
-rw-r--r--libcxx/include/__algorithm/ranges_contains_subrange.h99
-rw-r--r--libcxx/include/algorithm14
-rw-r--r--libcxx/include/libcxx.imp1
-rw-r--r--libcxx/include/module.modulemap.in1
-rw-r--r--libcxx/include/sstream16
-rw-r--r--libcxx/modules/CMakeLists.txt20
-rw-r--r--libcxx/modules/CMakeLists.txt.in88
-rw-r--r--libcxx/modules/std/algorithm.inc2
-rw-r--r--libcxx/test/libcxx/algorithms/ranges_robust_against_copying_projections.pass.cpp4
-rw-r--r--libcxx/test/libcxx/diagnostics/ranges.nodiscard_extensions.compile.pass.cpp2
-rw-r--r--libcxx/test/libcxx/diagnostics/ranges.nodiscard_extensions.verify.cpp4
-rw-r--r--libcxx/test/std/algorithms/alg.nonmodifying/alg.contains/ranges.contains_subrange.pass.cpp320
-rw-r--r--libcxx/test/std/input.output/string.streams/istringstream/types.compile.pass.cpp75
-rw-r--r--libcxx/test/std/input.output/string.streams/istringstream/types.pass.cpp39
-rw-r--r--libcxx/test/std/input.output/string.streams/ostringstream/types.compile.pass.cpp75
-rw-r--r--libcxx/test/std/input.output/string.streams/ostringstream/types.pass.cpp39
-rw-r--r--libcxx/test/std/input.output/string.streams/stringbuf/types.compile.pass.cpp70
-rw-r--r--libcxx/test/std/input.output/string.streams/stringbuf/types.pass.cpp39
-rw-r--r--libcxx/test/std/input.output/string.streams/stringstream/types.compile.pass.cpp72
-rw-r--r--libcxx/test/std/input.output/string.streams/stringstream/types.pass.cpp39
-rw-r--r--libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align.except.pass.cpp4
-rw-r--r--libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align_nothrow.except.pass.cpp4
-rw-r--r--libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align.except.pass.cpp4
-rw-r--r--libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align_nothrow.except.pass.cpp4
-rw-r--r--libcxx/test/std/library/description/conventions/customization.point.object/niebloid.compile.pass.cpp1
-rw-r--r--lld/COFF/Driver.cpp12
-rw-r--r--lld/ELF/Arch/LoongArch.cpp6
-rw-r--r--lld/ELF/Arch/SystemZ.cpp607
-rw-r--r--lld/ELF/CMakeLists.txt1
-rw-r--r--lld/ELF/Driver.cpp3
-rw-r--r--lld/ELF/InputFiles.cpp8
-rw-r--r--lld/ELF/InputSection.cpp7
-rw-r--r--lld/ELF/Relocations.cpp27
-rw-r--r--lld/ELF/Relocations.h3
-rw-r--r--lld/ELF/ScriptParser.cpp1
-rw-r--r--lld/ELF/SyntheticSections.cpp3
-rw-r--r--lld/ELF/Target.cpp2
-rw-r--r--lld/ELF/Target.h1
-rw-r--r--lld/ELF/Thunks.cpp2
-rw-r--r--lld/MinGW/Driver.cpp4
-rw-r--r--lld/MinGW/Options.td5
-rw-r--r--lld/test/ELF/Inputs/systemz-init.s5
-rw-r--r--lld/test/ELF/arm-thumb-thunk-v6m-xo.s2
-rw-r--r--lld/test/ELF/basic-systemz.s63
-rw-r--r--lld/test/ELF/emulation-systemz.s29
-rw-r--r--lld/test/ELF/lto/systemz.ll18
-rw-r--r--lld/test/ELF/systemz-got.s16
-rw-r--r--lld/test/ELF/systemz-gotent-relax-align.s48
-rw-r--r--lld/test/ELF/systemz-gotent-relax-und-dso.s68
-rw-r--r--lld/test/ELF/systemz-gotent-relax.s91
-rw-r--r--lld/test/ELF/systemz-ifunc-nonpreemptible.s75
-rw-r--r--lld/test/ELF/systemz-init-padding.s27
-rw-r--r--lld/test/ELF/systemz-pie.s38
-rw-r--r--lld/test/ELF/systemz-plt.s83
-rw-r--r--lld/test/ELF/systemz-reloc-abs.s32
-rw-r--r--lld/test/ELF/systemz-reloc-disp12.s21
-rw-r--r--lld/test/ELF/systemz-reloc-disp20.s21
-rw-r--r--lld/test/ELF/systemz-reloc-got.s92
-rw-r--r--lld/test/ELF/systemz-reloc-gotrel.s36
-rw-r--r--lld/test/ELF/systemz-reloc-pc16.s39
-rw-r--r--lld/test/ELF/systemz-reloc-pc32.s39
-rw-r--r--lld/test/ELF/systemz-reloc-pcdbl.s68
-rw-r--r--lld/test/ELF/systemz-tls-gd.s142
-rw-r--r--lld/test/ELF/systemz-tls-ie.s121
-rw-r--r--lld/test/ELF/systemz-tls-ld.s114
-rw-r--r--lld/test/ELF/systemz-tls-le.s61
-rw-r--r--lld/test/MinGW/driver.test7
-rw-r--r--lld/test/lit.cfg.py1
-rw-r--r--lldb/bindings/python/CMakeLists.txt4
-rw-r--r--lldb/bindings/python/python-wrapper.swig31
-rw-r--r--lldb/examples/python/cmdtemplate.py129
-rw-r--r--lldb/examples/python/templates/parsed_cmd.py357
-rw-r--r--lldb/include/lldb/Interpreter/CommandObject.h5
-rw-r--r--lldb/include/lldb/Interpreter/ScriptInterpreter.h29
-rw-r--r--lldb/include/lldb/Target/StopInfo.h5
-rw-r--r--lldb/include/lldb/Target/Thread.h21
-rw-r--r--lldb/packages/Python/lldbsuite/test/concurrent_base.py34
-rw-r--r--lldb/packages/Python/lldbsuite/test/configuration.py4
-rw-r--r--lldb/packages/Python/lldbsuite/test/decorators.py69
-rw-r--r--lldb/packages/Python/lldbsuite/test/dotest.py14
-rw-r--r--lldb/packages/Python/lldbsuite/test/lldbtest.py28
-rw-r--r--lldb/packages/Python/lldbsuite/test/test_result.py18
-rw-r--r--lldb/source/Commands/CommandObjectCommands.cpp729
-rw-r--r--lldb/source/Commands/CommandObjectProcess.cpp126
-rw-r--r--lldb/source/Commands/Options.td22
-rw-r--r--lldb/source/Core/ValueObject.cpp2
-rw-r--r--lldb/source/Expression/DWARFExpression.cpp7
-rw-r--r--lldb/source/Interpreter/CommandObject.cpp17
-rw-r--r--lldb/source/Plugins/Platform/MacOSX/PlatformRemoteAppleXR.h5
-rw-r--r--lldb/source/Plugins/Process/Utility/StopInfoMachException.cpp83
-rw-r--r--lldb/source/Plugins/Process/Utility/StopInfoMachException.h11
-rw-r--r--lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.h2
-rw-r--r--lldb/source/Plugins/ScriptInterpreter/Python/SWIGPythonBridge.h7
-rw-r--r--lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp253
-rw-r--r--lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPythonImpl.h21
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DWARFDeclContext.h4
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.cpp103
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.h9
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.h3
-rw-r--r--lldb/source/Target/Thread.cpp19
-rw-r--r--lldb/test/API/commands/command/script/add/TestAddParsedCommand.py146
-rw-r--r--lldb/test/API/commands/command/script/add/test_commands.py174
-rw-r--r--lldb/test/API/commands/expression/test/TestExprs.py4
-rw-r--r--lldb/test/API/commands/platform/sdk/TestPlatformSDK.py4
-rw-r--r--lldb/test/API/functionalities/breakpoint/hardware_breakpoints/require_hw_breakpoints/TestRequireHWBreakpoints.py8
-rw-r--r--lldb/test/API/functionalities/breakpoint/thread_plan_user_breakpoint/TestThreadPlanUserBreakpoint.py2
-rw-r--r--lldb/test/API/functionalities/jitloader_gdb/TestJITLoaderGDB.py4
-rw-r--r--lldb/test/API/functionalities/launch_stop_at_entry/TestStopAtEntry.py8
-rw-r--r--lldb/test/API/functionalities/thread/state/TestThreadStates.py6
-rw-r--r--lldb/test/API/functionalities/tty/TestTerminal.py6
-rw-r--r--lldb/test/API/lang/c/shared_lib/TestSharedLib.py4
-rw-r--r--lldb/test/API/lang/c/shared_lib_stripped_symbols/TestSharedLibStrippedSymbols.py4
-rw-r--r--lldb/test/API/lang/cpp/namespace/TestNamespaceLookup.py10
-rw-r--r--lldb/test/API/lang/cpp/reference-to-outer-type/TestCppReferenceToOuterClass.py4
-rw-r--r--lldb/test/API/lang/cpp/thread_local/TestThreadLocal.py5
-rw-r--r--lldb/test/API/lang/objc/hidden-ivars/TestHiddenIvars.py4
-rw-r--r--lldb/test/API/lldbtest.py45
-rw-r--r--lldb/test/API/macosx/universal/TestUniversal.py8
-rw-r--r--lldb/test/API/tools/lldb-server/libraries-svr4/TestGdbRemoteLibrariesSvr4Support.py2
-rw-r--r--lldb/test/API/tools/lldb-server/test/test_lldbgdbserverutils.py4
-rw-r--r--lldb/tools/lldb-dap/Breakpoint.cpp76
-rw-r--r--lldb/tools/lldb-dap/Breakpoint.h33
-rw-r--r--lldb/tools/lldb-dap/BreakpointBase.cpp299
-rw-r--r--lldb/tools/lldb-dap/BreakpointBase.h33
-rw-r--r--lldb/tools/lldb-dap/CMakeLists.txt1
-rw-r--r--lldb/tools/lldb-dap/FunctionBreakpoint.cpp12
-rw-r--r--lldb/tools/lldb-dap/FunctionBreakpoint.h4
-rw-r--r--lldb/tools/lldb-dap/JSONUtils.cpp46
-rw-r--r--lldb/tools/lldb-dap/JSONUtils.h5
-rw-r--r--lldb/tools/lldb-dap/SourceBreakpoint.cpp304
-rw-r--r--lldb/tools/lldb-dap/SourceBreakpoint.h30
-rw-r--r--lldb/tools/lldb-dap/lldb-dap.cpp17
-rw-r--r--lldb/unittests/ScriptInterpreter/Python/PythonTestSuite.cpp8
-rw-r--r--lldb/unittests/SymbolFile/DWARF/CMakeLists.txt1
-rw-r--r--lldb/unittests/SymbolFile/DWARF/DWARFDebugNamesIndexTest.cpp208
-rw-r--r--llvm/CMakeLists.txt6
-rw-r--r--llvm/docs/AMDGPUUsage.rst45
-rw-r--r--llvm/docs/CodingStandards.rst5
-rw-r--r--llvm/include/llvm/ADT/APFixedPoint.h1
-rw-r--r--llvm/include/llvm/ADT/BitVector.h2
-rw-r--r--llvm/include/llvm/Analysis/TargetLibraryInfo.h3
-rw-r--r--llvm/include/llvm/Analysis/ValueTracking.h6
-rw-r--r--llvm/include/llvm/Analysis/VecFuncs.def194
-rw-r--r--llvm/include/llvm/CodeGen/AccelTable.h49
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h2
-rw-r--r--llvm/include/llvm/CodeGen/ISDOpcodes.h6
-rw-r--r--llvm/include/llvm/CodeGen/SelectionDAG.h2
-rw-r--r--llvm/include/llvm/CodeGenTypes/LowLevelType.h14
-rw-r--r--llvm/include/llvm/Frontend/Driver/CodeGenOptions.h3
-rw-r--r--llvm/include/llvm/IR/Intrinsics.td2
-rw-r--r--llvm/include/llvm/Object/COFFImportFile.h18
-rw-r--r--llvm/include/llvm/Passes/CodeGenPassBuilder.h67
-rw-r--r--llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h97
-rw-r--r--llvm/include/llvm/ProfileData/Coverage/MCDCTypes.h54
-rw-r--r--llvm/include/llvm/ProfileData/InstrProf.h11
-rw-r--r--llvm/include/llvm/Support/ExponentialBackoff.h65
-rw-r--r--llvm/include/llvm/Support/TargetOpcodes.def3
-rw-r--r--llvm/include/llvm/Target/GenericOpcodes.td6
-rw-r--r--llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td1
-rw-r--r--llvm/include/llvm/Target/TargetSelectionDAG.td3
-rw-r--r--llvm/include/llvm/TargetParser/ARMTargetParser.h1
-rw-r--r--llvm/include/llvm/Transforms/InstCombine/InstCombiner.h39
-rw-r--r--llvm/lib/Analysis/DomConditionCache.cpp12
-rw-r--r--llvm/lib/Analysis/IVDescriptors.cpp2
-rw-r--r--llvm/lib/Analysis/InstructionSimplify.cpp41
-rw-r--r--llvm/lib/Analysis/TargetLibraryInfo.cpp14
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp142
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/AccelTable.cpp130
-rw-r--r--llvm/lib/CodeGen/BasicBlockSections.cpp11
-rw-r--r--llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp2
-rw-r--r--llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp5
-rw-r--r--llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp2
-rw-r--r--llvm/lib/CodeGen/IntrinsicLowering.cpp6
-rw-r--r--llvm/lib/CodeGen/MachineVerifier.cpp14
-rw-r--r--llvm/lib/CodeGen/SelectOptimize.cpp6
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp20
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp3
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp6
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp66
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp6
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp8
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp1
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp2
-rw-r--r--llvm/lib/CodeGen/TargetLoweringBase.cpp5
-rw-r--r--llvm/lib/CodeGen/TargetPassConfig.cpp2
-rw-r--r--llvm/lib/CodeGen/TypePromotion.cpp17
-rw-r--r--llvm/lib/Frontend/Driver/CodeGenOptions.cpp4
-rw-r--r--llvm/lib/IR/AsmWriter.cpp182
-rw-r--r--llvm/lib/IR/BasicBlock.cpp2
-rw-r--r--llvm/lib/IR/DebugInfo.cpp4
-rw-r--r--llvm/lib/IR/IntrinsicInst.cpp2
-rw-r--r--llvm/lib/Object/COFFImportFile.cpp4
-rw-r--r--llvm/lib/Object/ELFObjectFile.cpp4
-rw-r--r--llvm/lib/ProfileData/Coverage/CoverageMapping.cpp113
-rw-r--r--llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp32
-rw-r--r--llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp27
-rw-r--r--llvm/lib/ProfileData/InstrProf.cpp49
-rw-r--r--llvm/lib/Support/APFixedPoint.cpp6
-rw-r--r--llvm/lib/Support/CMakeLists.txt1
-rw-r--r--llvm/lib/Support/DeltaAlgorithm.cpp6
-rw-r--r--llvm/lib/Support/ExponentialBackoff.cpp29
-rw-r--r--llvm/lib/Support/LockFileManager.cpp38
-rw-r--r--llvm/lib/Target/AArch64/AArch64.td5
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp101
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.td16
-rw-r--r--llvm/lib/Target/AArch64/AArch64SchedA53.td2
-rw-r--r--llvm/lib/Target/AArch64/AArch64SchedA57.td2
-rw-r--r--llvm/lib/Target/AArch64/AArch64SchedA64FX.td3
-rw-r--r--llvm/lib/Target/AArch64/AArch64SchedAmpere1B.td1149
-rw-r--r--llvm/lib/Target/AArch64/AArch64SchedCyclone.td2
-rw-r--r--llvm/lib/Target/AArch64/AArch64SchedExynosM3.td2
-rw-r--r--llvm/lib/Target/AArch64/AArch64SchedExynosM4.td2
-rw-r--r--llvm/lib/Target/AArch64/AArch64SchedExynosM5.td2
-rw-r--r--llvm/lib/Target/AArch64/AArch64SchedFalkor.td2
-rw-r--r--llvm/lib/Target/AArch64/AArch64SchedKryo.td2
-rw-r--r--llvm/lib/Target/AArch64/AArch64SchedNeoverseN1.td2
-rw-r--r--llvm/lib/Target/AArch64/AArch64SchedNeoverseN2.td2
-rw-r--r--llvm/lib/Target/AArch64/AArch64SchedNeoverseV1.td3
-rw-r--r--llvm/lib/Target/AArch64/AArch64SchedNeoverseV2.td3
-rw-r--r--llvm/lib/Target/AArch64/AArch64SchedTSV110.td2
-rw-r--r--llvm/lib/Target/AArch64/AArch64SchedThunderX.td2
-rw-r--r--llvm/lib/Target/AArch64/AArch64SchedThunderX2T99.td2
-rw-r--r--llvm/lib/Target/AArch64/AArch64SchedThunderX3T110.td2
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp2
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp16
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp3
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp139
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp1
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp1
-rw-r--r--llvm/lib/Target/AMDGPU/DSDIRInstructions.td8
-rw-r--r--llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp84
-rw-r--r--llvm/lib/Target/AMDGPU/EXPInstructions.td126
-rw-r--r--llvm/lib/Target/AMDGPU/GCNProcessors.td4
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSubtarget.h6
-rw-r--r--llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp16
-rw-r--r--llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h9
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp7
-rw-r--r--llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp5
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.td165
-rw-r--r--llvm/lib/Target/AMDGPU/SMInstructions.td14
-rw-r--r--llvm/lib/Target/BPF/AsmParser/BPFAsmParser.cpp1
-rw-r--r--llvm/lib/Target/BPF/BPFInstrInfo.td4
-rw-r--r--llvm/lib/Target/DirectX/DXIL.td44
-rw-r--r--llvm/lib/Target/Hexagon/HexagonPatterns.td4
-rw-r--r--llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp7
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp2
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXInstrInfo.td1
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXIntrinsics.td1
-rw-r--r--llvm/lib/Target/RISCV/RISCVFoldMasks.cpp13
-rw-r--r--llvm/lib/Target/RISCV/RISCVFrameLowering.cpp163
-rw-r--r--llvm/lib/Target/RISCV/RISCVFrameLowering.h10
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp112
-rw-r--r--llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp34
-rw-r--r--llvm/lib/Target/RISCV/RISCVRegisterInfo.h3
-rw-r--r--llvm/lib/Target/RISCV/RISCVTargetMachine.cpp7
-rw-r--r--llvm/lib/Target/SPIRV/Analysis/CMakeLists.txt2
-rw-r--r--llvm/lib/Target/Sparc/SparcISelLowering.cpp11
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp6
-rw-r--r--llvm/lib/Target/X86/X86InstrSystem.td4
-rw-r--r--llvm/lib/Target/X86/X86SchedIceLake.td8
-rw-r--r--llvm/lib/TargetParser/ARMTargetParser.cpp58
-rw-r--r--llvm/lib/TargetParser/TargetParser.cpp4
-rw-r--r--llvm/lib/ToolDrivers/llvm-dlltool/DlltoolDriver.cpp91
-rw-r--r--llvm/lib/ToolDrivers/llvm-lib/LibDriver.cpp5
-rw-r--r--llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp11
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp2
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp3
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp2
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp6
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp7
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp12
-rw-r--r--llvm/lib/Transforms/InstCombine/InstructionCombining.cpp9
-rw-r--r--llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp13
-rw-r--r--llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp34
-rw-r--r--llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp54
-rw-r--r--llvm/lib/Transforms/Scalar/LICM.cpp69
-rw-r--r--llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp73
-rw-r--r--llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp21
-rw-r--r--llvm/lib/Transforms/Utils/ValueMapper.cpp1
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp112
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlan.h7
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp83
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanTransforms.h15
-rw-r--r--llvm/test/Analysis/CostModel/AArch64/cast.ll10
-rw-r--r--llvm/test/Analysis/CostModel/AArch64/fptoi_sat.ll4
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/rvv-vectorextract.ll169
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/rvv-vectorinsert.ll335
-rw-r--r--llvm/test/Analysis/LoopAccessAnalysis/number-of-memchecks.ll2
-rw-r--r--llvm/test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-xtn.mir24
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir3
-rw-r--r--llvm/test/CodeGen/AArch64/Redundantstore.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-addv.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-tbz.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-unroll-and-jam.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-vcvtfp2fxs-combine.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-build-vector.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-movi.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-popcnt.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-rev.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/asm-large-immediate.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/basic-block-sections-cold.ll51
-rw-r--r--llvm/test/CodeGen/AArch64/basic-block-sections-unsafe.ll121
-rw-r--r--llvm/test/CodeGen/AArch64/bf16-shuffle.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/bf16.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/bitreverse.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/cmpwithshort.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-add-scalable.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-mul-scalable.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/complex-deinterleaving-i32-add-scalable.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/complex-deinterleaving-i32-mul-scalable.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/complex-deinterleaving-i64-add-scalable.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/complex-deinterleaving-i64-mul-scalable.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/complex-deinterleaving-i8-add-scalable.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/complex-deinterleaving-splat-scalable.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/complex-deinterleaving-splat.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/cond-br-tuning.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/consthoist-gep.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/dbg-assign-tag-offset-mix-loc.ll72
-rw-r--r--llvm/test/CodeGen/AArch64/dbg-assign-tag-offset.ll76
-rw-r--r--llvm/test/CodeGen/AArch64/extbinopload.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/extract-sext-zext.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/fabs-combine.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/fabs.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/faddsub.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/fcmp.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/fcopysign.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/fcvt.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/fcvt_combine.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/fdiv.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/fence-singlethread.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/fexplog.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/fminimummaximum.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/fminmax.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/fmla.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/fmul.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/fneg.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/fp16_intrinsic_lane.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/fp16_intrinsic_scalar_1op.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/fp16_intrinsic_scalar_2op.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/fp16_intrinsic_scalar_3op.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/fp16_intrinsic_vector_1op.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/fp16_intrinsic_vector_2op.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/fp16_intrinsic_vector_3op.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/fpext.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/fpow.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/fpowi.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/fptoi.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/fptrunc.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/frem.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/frintn.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/fsincos.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/fsqrt.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/hints.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/icmp.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/insertextract.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/intrinsics-memory-barrier.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/itofp.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/legalize-bug-bogus-cpu.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/merge-scoped-aa-store.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/merge-store-dependency.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/merge-store.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/min-max-combine.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/min-max.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/misched-fusion-addadrp.ll3
-rw-r--r--llvm/test/CodeGen/AArch64/misched-fusion-addr.ll3
-rw-r--r--llvm/test/CodeGen/AArch64/misched-fusion-aes.ll3
-rw-r--r--llvm/test/CodeGen/AArch64/mul_pow2.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/neon-extadd-extract.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/neon-mov.ll50
-rw-r--r--llvm/test/CodeGen/AArch64/neon_rbit.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/no-quad-ldp-stp.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/nzcv-save.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/pacbti-module-attrs.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/postra-mi-sched.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/rbit.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/rcpc3-sve.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/rcpc3.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/rem_crash.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/rotate.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/setcc_knownbits.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sls-stackprotector-outliner.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/srem-seteq-vec-nonsplat.ll60
-rw-r--r--llvm/test/CodeGen/AArch64/stack-probing-64k.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/stack-probing-dynamic.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/stack-probing-sve.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/stack-probing.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fcopysign.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve2-fcopysign.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/tailmerging_in_mbp.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/tbz-tbnz.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/urem-seteq-vec-nonzero.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/urem-vector-lkk.ll14
-rw-r--r--llvm/test/CodeGen/AArch64/v3f-to-int.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/v8.5a-neon-frint3264-intrinsic.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/v8.5a-scalar-frint3264-intrinsic.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/vecreduce-fadd-strict.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/vecreduce-fadd.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/vecreduce-fmul-strict.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/vecreduce-fmul.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/xar.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_udec_wrap.ll124
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_uinc_wrap.ll128
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/bitcast_38_i16.ll85
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll126
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-build-vector.mir123
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/memory-legalizer-atomic-fence.ll96
-rw-r--r--llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll88
-rw-r--r--llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/directive-amdgcn-target.ll12
-rw-r--r--llvm/test/CodeGen/AMDGPU/elf-header-flags-mach.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/generic-targets-require-v6.ll12
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-atomics-fp.ll36
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-saddr-atomics-min-max-system.ll128
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-saddr-atomics.ll248
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-saddr-load.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/hsa-generic-target-features.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/insert-waitcnts-crash.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.gather4.d16.dim.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.dim.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll34
-rw-r--r--llvm/test/CodeGen/AMDGPU/memory-legalizer-fence.ll96
-rw-r--r--llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-agent.ll544
-rw-r--r--llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-system.ll544
-rw-r--r--llvm/test/CodeGen/AMDGPU/memory-legalizer-global-agent.ll544
-rw-r--r--llvm/test/CodeGen/AMDGPU/memory-legalizer-global-system.ll512
-rw-r--r--llvm/test/CodeGen/AMDGPU/preload-kernarg-header.ll9
-rw-r--r--llvm/test/CodeGen/AMDGPU/preload-kernargs.ll15978
-rw-r--r--llvm/test/CodeGen/AMDGPU/readsteadycounter.ll24
-rw-r--r--llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll251
-rw-r--r--llvm/test/CodeGen/Generic/replace-intrinsics-with-veclib.ll11
-rw-r--r--llvm/test/CodeGen/Hexagon/vector-zext-v4i8.ll112
-rw-r--r--llvm/test/CodeGen/Mips/hf1_body.ll4
-rw-r--r--llvm/test/CodeGen/NVPTX/intrinsics.ll12
-rw-r--r--llvm/test/CodeGen/PowerPC/licm-remat.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/O3-pipeline.ll1
-rw-r--r--llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll116
-rw-r--r--llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll92
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll235
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll1129
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll450
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll541
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll1642
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll3424
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll1177
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll3300
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll1476
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll215
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll167
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll234
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll117
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll4860
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll112
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll972
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll906
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll258
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll290
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll183
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll827
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll121
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll4198
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll542
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll125
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector.ll294
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll386
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll54
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll54
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll54
-rw-r--r--llvm/test/CodeGen/RISCV/signbit-test.ll30
-rw-r--r--llvm/test/CodeGen/RISCV/signed-truncation-check.ll104
-rw-r--r--llvm/test/CodeGen/RISCV/typepromotion-overflow.ll387
-rw-r--r--llvm/test/CodeGen/SPARC/64atomics.ll54
-rw-r--r--llvm/test/CodeGen/SPARC/atomicrmw-uinc-udec-wrap.ll120
-rw-r--r--llvm/test/CodeGen/WebAssembly/wasm-eh-prepare.ll2
-rw-r--r--llvm/test/CodeGen/X86/fold-sext-trunc.ll2
-rw-r--r--llvm/test/CodeGen/X86/fp128-libcalls-strict.ll702
-rw-r--r--llvm/test/CodeGen/X86/fp128-libcalls.ll406
-rw-r--r--llvm/test/CodeGen/X86/load-combine.ll3
-rw-r--r--llvm/test/CodeGen/X86/statepoint-relocate-undef.ll32
-rw-r--r--llvm/test/CodeGen/X86/vector-reduce-ctpop.ll460
-rw-r--r--llvm/test/DebugInfo/AArch64/select-optimize-trailing-dbg-records.ll63
-rw-r--r--llvm/test/DebugInfo/Generic/assignment-tracking/declare-to-assign/hwasan.ll3
-rw-r--r--llvm/test/DebugInfo/Generic/ipsccp-remap-assign-id.ll59
-rw-r--r--llvm/test/DebugInfo/X86/debug-names-dwarf64.ll10
-rw-r--r--llvm/test/DebugInfo/X86/debug-names-types.ll42
-rw-r--r--llvm/test/DebugInfo/X86/dwarfdump-str-offsets.s5
-rw-r--r--llvm/test/DebugInfo/X86/skeleton-unit-verify.s2
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/dbg-assign-tag-offset.ll60
-rw-r--r--llvm/test/MC/BPF/insn-unit.s3
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_ldsdir.txt1
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vdsdir.txt1
-rw-r--r--llvm/test/MC/Mips/macro-la-pic.s22
-rw-r--r--llvm/test/TableGen/GlobalISelCombinerEmitter/builtins/match-table-replacerreg.td24
-rw-r--r--llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-imms.td32
-rw-r--r--llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-intrinsics.td5
-rw-r--r--llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-patfrag-root.td4
-rw-r--r--llvm/test/TableGen/GlobalISelCombinerEmitter/match-table.td62
-rw-r--r--llvm/test/TableGen/GlobalISelEmitter.td2
-rw-r--r--llvm/test/Transforms/CallSiteSplitting/callsite-split-or-phi.ll2
-rw-r--r--llvm/test/Transforms/CallSiteSplitting/callsite-split.ll2
-rw-r--r--llvm/test/Transforms/CalledValuePropagation/simple-arguments.ll2
-rw-r--r--llvm/test/Transforms/CalledValuePropagation/simple-memory.ll2
-rw-r--r--llvm/test/Transforms/CalledValuePropagation/simple-select.ll2
-rw-r--r--llvm/test/Transforms/ConstantHoisting/AArch64/const-hoist-gep.ll2
-rw-r--r--llvm/test/Transforms/CorrelatedValuePropagation/basic.ll11
-rw-r--r--llvm/test/Transforms/CorrelatedValuePropagation/switch.ll301
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/noop-stores.ll357
-rw-r--r--llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll20
-rw-r--r--llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll20
-rw-r--r--llvm/test/Transforms/InstCombine/fpclass-check-idioms.ll323
-rw-r--r--llvm/test/Transforms/InstCombine/fpclass-from-dom-cond.ll437
-rw-r--r--llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-d.ll14
-rw-r--r--llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll10
-rw-r--r--llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-d.ll10
-rw-r--r--llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-d.ll24
-rw-r--r--llvm/test/Transforms/InstCombine/simplify-demanded-fpclass.ll2
-rw-r--r--llvm/test/Transforms/InstSimplify/compare.ll30
-rw-r--r--llvm/test/Transforms/InstSimplify/gc_relocate.ll27
-rw-r--r--llvm/test/Transforms/LICM/expr-reassociate-int.ll364
-rw-r--r--llvm/test/Transforms/LICM/sink-foldable.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/arbitrary-induction-step.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/backedge-overflow.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/interleaved_cost.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/low_trip_memcheck_cost.ll38
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/amdlibm-calls-finite.ll332
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/amdlibm-calls.ll869
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/sin-sqrt.ll29
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/vec3-base.ll317
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/vec3-calls.ll60
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/vec3-gather-some-loads.ll66
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/vec3-reorder-reshuffle.ll513
-rw-r--r--llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/reorder-gep-inbounds.ll51
-rw-r--r--llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/reorder-gep.ll175
-rw-r--r--llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lower-gep-reorder.ll65
-rw-r--r--llvm/test/Transforms/SeparateConstOffsetFromGEP/reorder-gep.ll188
-rw-r--r--llvm/test/Transforms/Util/add-TLI-mappings.ll23
-rw-r--r--llvm/test/lit.cfg.py1
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/lanai_isel.ll.expected4
-rw-r--r--llvm/test/tools/dsymutil/ARM/dwarf5-dwarf4-combination-macho.test8
-rw-r--r--llvm/test/tools/llvm-dwarfdump/X86/verify_file_encoding.yaml2
-rw-r--r--llvm/test/tools/llvm-dwarfutil/ELF/X86/dwarf5-macro.test7
-rw-r--r--llvm/test/tools/llvm-mca/AArch64/Ampere/Ampere1B/basic-instructions.s3724
-rw-r--r--llvm/test/tools/llvm-mca/AArch64/Ampere/Ampere1B/cssc-instructions.s76
-rw-r--r--llvm/test/tools/llvm-mca/AArch64/Ampere/Ampere1B/mte-instructions.s349
-rw-r--r--llvm/test/tools/llvm-mca/AArch64/Ampere/Ampere1B/neon-instructions.s3235
-rw-r--r--llvm/test/tools/llvm-mca/AArch64/Ampere/Ampere1B/shifted-register.s31
-rw-r--r--llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512.s218
-rw-r--r--llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512dq.s98
-rw-r--r--llvm/test/tools/llvm-objdump/ELF/AMDGPU/subtarget.ll8
-rw-r--r--llvm/test/tools/obj2yaml/ELF/bb-addr-map-pgo-analysis-map.yaml232
-rw-r--r--llvm/test/tools/yaml2obj/ELF/bb-addr-map-pgo-analysis-map.yaml83
-rw-r--r--llvm/tools/llvm-dwarfdump/llvm-dwarfdump.cpp2
-rw-r--r--llvm/tools/llvm-readobj/ELFDumper.cpp4
-rw-r--r--llvm/tools/obj2yaml/elf2yaml.cpp37
-rw-r--r--llvm/unittests/ADT/BitVectorTest.cpp3
-rw-r--r--llvm/unittests/CodeGen/LowLevelTypeTest.cpp3
-rw-r--r--llvm/unittests/ProfileData/CoverageMappingTest.cpp24
-rw-r--r--llvm/unittests/Support/CMakeLists.txt1
-rw-r--r--llvm/unittests/Support/ExponentialBackoffTest.cpp31
-rw-r--r--llvm/unittests/Target/SPIRV/CMakeLists.txt1
-rw-r--r--llvm/unittests/TargetParser/TargetParserTest.cpp8
-rw-r--r--llvm/utils/TableGen/AsmMatcherEmitter.cpp38
-rw-r--r--llvm/utils/TableGen/AsmWriterEmitter.cpp15
-rw-r--r--llvm/utils/TableGen/CTagsEmitter.cpp4
-rw-r--r--llvm/utils/TableGen/CodeEmitterGen.cpp4
-rw-r--r--llvm/utils/TableGen/CodeGenDAGPatterns.cpp191
-rw-r--r--llvm/utils/TableGen/CodeGenHwModes.cpp6
-rw-r--r--llvm/utils/TableGen/CodeGenInstAlias.cpp6
-rw-r--r--llvm/utils/TableGen/CodeGenInstruction.cpp8
-rw-r--r--llvm/utils/TableGen/CodeGenInstruction.h2
-rw-r--r--llvm/utils/TableGen/CodeGenRegisters.cpp104
-rw-r--r--llvm/utils/TableGen/CodeGenRegisters.h12
-rw-r--r--llvm/utils/TableGen/CodeGenSchedule.cpp6
-rw-r--r--llvm/utils/TableGen/CodeGenTarget.cpp4
-rw-r--r--llvm/utils/TableGen/DAGISelMatcherEmitter.cpp2
-rw-r--r--llvm/utils/TableGen/DAGISelMatcherGen.cpp6
-rw-r--r--llvm/utils/TableGen/DAGISelMatcherOpt.cpp4
-rw-r--r--llvm/utils/TableGen/DFAEmitter.cpp4
-rw-r--r--llvm/utils/TableGen/DXILEmitter.cpp162
-rw-r--r--llvm/utils/TableGen/DecoderEmitter.cpp20
-rw-r--r--llvm/utils/TableGen/FastISelEmitter.cpp2
-rw-r--r--llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp4
-rw-r--r--llvm/utils/TableGen/GlobalISelEmitter.cpp4
-rw-r--r--llvm/utils/TableGen/GlobalISelMatchTable.cpp26
-rw-r--r--llvm/utils/TableGen/GlobalISelMatchTable.h2
-rw-r--r--llvm/utils/TableGen/InfoByHwMode.cpp4
-rw-r--r--llvm/utils/TableGen/InfoByHwMode.h4
-rw-r--r--llvm/utils/TableGen/IntrinsicEmitter.cpp2
-rw-r--r--llvm/utils/TableGen/OptParserEmitter.cpp4
-rw-r--r--llvm/utils/TableGen/SearchableTableEmitter.cpp6
-rw-r--r--llvm/utils/TableGen/SequenceToOffsetTable.h2
-rw-r--r--llvm/utils/TableGen/SubtargetEmitter.cpp12
-rw-r--r--llvm/utils/TableGen/WebAssemblyDisassemblerEmitter.cpp2
-rw-r--r--llvm/utils/TableGen/X86CompressEVEXTablesEmitter.cpp13
-rw-r--r--llvm/utils/TableGen/X86DisassemblerTables.cpp4
-rw-r--r--llvm/utils/TableGen/X86FoldTablesEmitter.cpp34
-rw-r--r--llvm/utils/gn/secondary/clang/lib/AST/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/clang/lib/Frontend/BUILD.gn3
-rw-r--r--llvm/utils/gn/secondary/clang/lib/InstallAPI/BUILD.gn10
-rw-r--r--llvm/utils/gn/secondary/clang/test/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/libcxx/include/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/lld/ELF/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/lldb/tools/lldb-dap/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/Support/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/llvm/unittests/Support/BUILD.gn1
-rw-r--r--mlir/docs/TargetLLVMIR.md6
-rw-r--r--mlir/include/mlir-c/BuiltinTypes.h6
-rw-r--r--mlir/include/mlir-c/Dialect/LLVM.h61
-rw-r--r--mlir/include/mlir-c/Dialect/SparseTensor.h24
-rw-r--r--mlir/include/mlir/Analysis/Presburger/Barvinok.h54
-rw-r--r--mlir/include/mlir/Analysis/Presburger/GeneratingFunction.h2
-rw-r--r--mlir/include/mlir/Analysis/Presburger/IntegerRelation.h31
-rw-r--r--mlir/include/mlir/Analysis/Presburger/Matrix.h26
-rw-r--r--mlir/include/mlir/Analysis/Presburger/PresburgerRelation.h4
-rw-r--r--mlir/include/mlir/Analysis/Presburger/Simplex.h6
-rw-r--r--mlir/include/mlir/Analysis/Presburger/Utils.h2
-rw-r--r--mlir/include/mlir/Dialect/AMDGPU/CMakeLists.txt1
-rw-r--r--mlir/include/mlir/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.h48
-rw-r--r--mlir/include/mlir/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.td45
-rw-r--r--mlir/include/mlir/Dialect/AMDGPU/TransformOps/CMakeLists.txt4
-rw-r--r--mlir/include/mlir/Dialect/AMDGPU/Transforms/Transforms.h3
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td19
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td16
-rw-r--r--mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td10
-rw-r--r--mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td151
-rw-r--r--mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h78
-rw-r--r--mlir/include/mlir/Dialect/Vector/Transforms/VectorRewritePatterns.h9
-rw-r--r--mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h8
-rw-r--r--mlir/include/mlir/IR/PatternMatch.h6
-rw-r--r--mlir/include/mlir/InitAllExtensions.h2
-rw-r--r--mlir/include/mlir/Transforms/DialectConversion.h49
-rw-r--r--mlir/lib/Analysis/Presburger/Barvinok.cpp304
-rw-r--r--mlir/lib/Analysis/Presburger/IntegerRelation.cpp26
-rw-r--r--mlir/lib/Analysis/Presburger/Matrix.cpp76
-rw-r--r--mlir/lib/Analysis/Presburger/PresburgerRelation.cpp6
-rw-r--r--mlir/lib/Analysis/Presburger/Simplex.cpp13
-rw-r--r--mlir/lib/Analysis/Presburger/Utils.cpp6
-rw-r--r--mlir/lib/Bindings/Python/DialectLLVM.cpp145
-rw-r--r--mlir/lib/Bindings/Python/DialectSparseTensor.cpp49
-rw-r--r--mlir/lib/Bindings/Python/IRTypes.cpp38
-rw-r--r--mlir/lib/CAPI/Dialect/LLVM.cpp68
-rw-r--r--mlir/lib/CAPI/Dialect/SparseTensor.cpp61
-rw-r--r--mlir/lib/CAPI/IR/BuiltinTypes.cpp8
-rw-r--r--mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp3
-rw-r--r--mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp36
-rw-r--r--mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp42
-rw-r--r--mlir/lib/Dialect/AMDGPU/CMakeLists.txt3
-rw-r--r--mlir/lib/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.cpp66
-rw-r--r--mlir/lib/Dialect/AMDGPU/TransformOps/CMakeLists.txt25
-rw-r--r--mlir/lib/Dialect/AMDGPU/Transforms/OptimizeSharedMemory.cpp48
-rw-r--r--mlir/lib/Dialect/Async/Transforms/AsyncToAsyncRuntime.cpp4
-rw-r--r--mlir/lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp2
-rw-r--r--mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp7
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Padding.cpp8
-rw-r--r--mlir/lib/Dialect/Math/Transforms/LegalizeToF32.cpp20
-rw-r--r--mlir/lib/Dialect/NVGPU/IR/NVGPUDialect.cpp3
-rw-r--r--mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp3
-rw-r--r--mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp190
-rw-r--r--mlir/lib/Dialect/Tensor/IR/TensorOps.cpp64
-rw-r--r--mlir/lib/Dialect/Tosa/IR/TosaOps.cpp6
-rw-r--r--mlir/lib/Dialect/Transform/IR/TransformOps.cpp12
-rw-r--r--mlir/lib/Dialect/Utils/IndexingUtils.cpp3
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/CMakeLists.txt1
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp97
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp14
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp4
-rw-r--r--mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp6
-rw-r--r--mlir/lib/Interfaces/InferIntRangeInterface.cpp2
-rw-r--r--mlir/lib/Target/LLVMIR/AttrKindDetail.h3
-rw-r--r--mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp38
-rw-r--r--mlir/lib/Transforms/Utils/DialectConversion.cpp646
-rw-r--r--mlir/python/CMakeLists.txt13
-rw-r--r--mlir/python/mlir/_mlir_libs/_mlir/ir.pyi28
-rw-r--r--mlir/python/mlir/dialects/llvm.py1
-rw-r--r--mlir/test/CAPI/llvm.c156
-rw-r--r--mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir8
-rw-r--r--mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir14
-rw-r--r--mlir/test/Conversion/SCFToOpenMP/reductions.mlir30
-rw-r--r--mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir37
-rw-r--r--mlir/test/Dialect/AMDGPU/transform_optimize_shmem_reads_writes.mlir64
-rw-r--r--mlir/test/Dialect/GPU/test-nvvm-pipeline.mlir30
-rw-r--r--mlir/test/Dialect/LLVMIR/nvvm.mlir12
-rw-r--r--mlir/test/Dialect/LLVMIR/rocdl.mlir12
-rw-r--r--mlir/test/Dialect/Linalg/invalid.mlir26
-rw-r--r--mlir/test/Dialect/Linalg/named-ops.mlir11
-rw-r--r--mlir/test/Dialect/Linalg/transform-op-mmt4d-to-fma.mlir69
-rw-r--r--mlir/test/Dialect/Linalg/vectorization-unsupported.mlir73
-rw-r--r--mlir/test/Dialect/Linalg/vectorization.mlir25
-rw-r--r--mlir/test/Dialect/NVGPU/tmaload-transform.mlir2
-rw-r--r--mlir/test/Dialect/OpenMP/invalid.mlir135
-rw-r--r--mlir/test/Dialect/OpenMP/ops.mlir89
-rw-r--r--mlir/test/Dialect/OpenMP/roundtrip.mlir21
-rw-r--r--mlir/test/Dialect/Tensor/canonicalize.mlir39
-rw-r--r--mlir/test/Dialect/Vector/linearize.mlir19
-rw-r--r--mlir/test/Dialect/Vector/vector-transfer-collapse-inner-most-dims.mlir19
-rwxr-xr-xmlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_d.mlir134
-rw-r--r--mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py13
-rw-r--r--mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py13
-rw-r--r--mlir/test/Integration/Dialect/SparseTensor/python/test_output.py19
-rw-r--r--mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py6
-rw-r--r--mlir/test/Integration/GPU/CUDA/sm90/gemm_f32_f16_f16_128x128x128.mlir3
-rw-r--r--mlir/test/Integration/GPU/CUDA/sm90/gemm_pred_f32_f16_f16_128x128x128.mlir3
-rw-r--r--mlir/test/Integration/GPU/CUDA/sm90/tma_load_128x64_swizzle128b.mlir3
-rw-r--r--mlir/test/Integration/GPU/CUDA/sm90/tma_load_64x64_swizzle128b.mlir3
-rw-r--r--mlir/test/Integration/GPU/CUDA/sm90/tma_load_64x8_8x128_noswizzle.mlir3
-rw-r--r--mlir/test/Target/LLVMIR/nvvmir.mlir19
-rw-r--r--mlir/test/Target/LLVMIR/openmp-reduction.mlir44
-rw-r--r--mlir/test/Target/LLVMIR/rocdl.mlir16
-rw-r--r--mlir/test/Transforms/test-legalizer.mlir14
-rw-r--r--mlir/test/lib/Dialect/Test/TestPatterns.cpp20
-rw-r--r--mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp29
-rw-r--r--mlir/test/python/dialects/llvm.py84
-rw-r--r--mlir/test/python/dialects/sparse_tensor/dialect.py11
-rw-r--r--mlir/test/python/ir/builtin_types.py35
-rw-r--r--mlir/unittests/Analysis/Presburger/BarvinokTest.cpp75
-rw-r--r--openmp/runtime/src/kmp_csupport.cpp5
-rw-r--r--openmp/runtime/src/kmp_gsupport.cpp2
-rw-r--r--openmp/runtime/src/kmp_lock.cpp6
-rw-r--r--openmp/runtime/src/kmp_lock.h17
-rw-r--r--polly/lib/Transform/ScheduleOptimizer.cpp23
-rw-r--r--polly/test/ScheduleOptimizer/schedule_computeout.ll4
-rw-r--r--utils/bazel/llvm-project-overlay/clang/BUILD.bazel20
-rw-r--r--utils/bazel/llvm-project-overlay/libc/BUILD.bazel18
-rw-r--r--utils/bazel/llvm-project-overlay/libc/test/src/__support/BUILD.bazel8
-rw-r--r--utils/bazel/llvm-project-overlay/libc/test/src/__support/FPUtil/BUILD.bazel1
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/BUILD.bazel72
1125 files changed, 61596 insertions, 36565 deletions
diff --git a/.ci/generate-buildkite-pipeline-premerge b/.ci/generate-buildkite-pipeline-premerge
index 4ebf304..c14ec46 100755
--- a/.ci/generate-buildkite-pipeline-premerge
+++ b/.ci/generate-buildkite-pipeline-premerge
@@ -233,7 +233,10 @@ linux_projects=$(add-dependencies ${linux_projects_to_test} | sort | uniq)
windows_projects_to_test=$(exclude-windows $(compute-projects-to-test ${modified_projects}))
windows_check_targets=$(check-targets ${windows_projects_to_test} | sort | uniq)
-windows_projects=$(add-dependencies ${windows_projects_to_test} | sort | uniq)
+# Temporary disable the windows job.
+# See https://discourse.llvm.org/t/rfc-future-of-windows-pre-commit-ci/76840
+#windows_projects=$(add-dependencies ${windows_projects_to_test} | sort | uniq)
+windows_projects=""
# Generate the appropriate pipeline
if [[ "${linux_projects}" != "" ]]; then
diff --git a/.github/workflows/llvm-project-tests.yml b/.github/workflows/llvm-project-tests.yml
index 68b4a68..43b9019 100644
--- a/.github/workflows/llvm-project-tests.yml
+++ b/.github/workflows/llvm-project-tests.yml
@@ -58,6 +58,10 @@ jobs:
lit-tests:
name: Lit Tests
runs-on: ${{ matrix.os }}
+ container:
+ image: ${{(startsWith(matrix.os, 'ubuntu') && 'ghcr.io/llvm/ci-ubuntu-22.04:latest') || null}}
+ volumes:
+ - /mnt/:/mnt/
strategy:
fail-fast: false
matrix:
@@ -77,6 +81,7 @@ jobs:
with:
python-version: ${{ inputs.python_version }}
- name: Install Ninja
+ if: runner.os != 'Linux'
uses: llvm/actions/install-ninja@main
# actions/checkout deletes any existing files in the new git directory,
# so this needs to either run before ccache-action or it has to use
@@ -108,8 +113,8 @@ jobs:
run: |
if [ "${{ runner.os }}" == "Linux" ]; then
builddir="/mnt/build/"
- sudo mkdir -p $builddir
- sudo chown `whoami`:`whoami` $builddir
+ mkdir -p $builddir
+ extra_cmake_args="-DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang"
else
builddir="$(pwd)"/build
fi
@@ -123,6 +128,7 @@ jobs:
-DLLDB_INCLUDE_TESTS=OFF \
-DCMAKE_C_COMPILER_LAUNCHER=sccache \
-DCMAKE_CXX_COMPILER_LAUNCHER=sccache \
+ $extra_cmake_args \
${{ inputs.extra_cmake_args }}
ninja -C "$builddir" '${{ inputs.build_target }}'
diff --git a/bolt/lib/Core/DebugData.cpp b/bolt/lib/Core/DebugData.cpp
index 8c3f6bd..2942f0b 100644
--- a/bolt/lib/Core/DebugData.cpp
+++ b/bolt/lib/Core/DebugData.cpp
@@ -230,7 +230,7 @@ template <typename DebugVector, typename ListEntry, typename DebugAddressEntry>
static bool emitWithBase(raw_ostream &OS, const DebugVector &Entries,
DebugAddrWriter &AddrWriter, DWARFUnit &CU,
uint32_t &Index, const ListEntry BaseAddressx,
- const ListEntry OffsetPair, const ListEntry EndOfList,
+ const ListEntry OffsetPair,
const std::function<void(uint32_t)> &Func) {
if (Entries.size() < 2)
return false;
@@ -241,7 +241,9 @@ static bool emitWithBase(raw_ostream &OS, const DebugVector &Entries,
const DebugAddressEntry &Entry = Entries[Index];
if (Entry.LowPC == 0)
break;
- assert(Base <= Entry.LowPC && "Entry base is higher than low PC");
+ // In case rnglists or loclists are not sorted.
+ if (Base > Entry.LowPC)
+ break;
uint32_t StartOffset = Entry.LowPC - Base;
uint32_t EndOffset = Entry.HighPC - Base;
if (encodeULEB128(EndOffset, TempBuffer) > 2)
@@ -266,8 +268,6 @@ static bool emitWithBase(raw_ostream &OS, const DebugVector &Entries,
encodeULEB128(OffsetEntry.EndOffset, OS);
Func(OffsetEntry.Index);
}
- support::endian::write(OS, static_cast<uint8_t>(EndOfList),
- llvm::endianness::little);
return true;
}
@@ -276,19 +276,17 @@ DebugRangeListsSectionWriter::addRanges(DebugAddressRangesVector &Ranges) {
std::lock_guard<std::mutex> Lock(WriterMutex);
RangeEntries.push_back(CurrentOffset);
- bool WrittenStartxLength = false;
std::sort(
Ranges.begin(), Ranges.end(),
[](const DebugAddressRange &R1, const DebugAddressRange &R2) -> bool {
return R1.LowPC < R2.LowPC;
});
for (unsigned I = 0; I < Ranges.size();) {
- WrittenStartxLength = false;
if (emitWithBase<DebugAddressRangesVector, dwarf::RnglistEntries,
- DebugAddressRange>(
- *CUBodyStream, Ranges, *AddrWriter, *CU, I,
- dwarf::DW_RLE_base_addressx, dwarf::DW_RLE_offset_pair,
- dwarf::DW_RLE_end_of_list, [](uint32_t Index) -> void {}))
+ DebugAddressRange>(*CUBodyStream, Ranges, *AddrWriter, *CU,
+ I, dwarf::DW_RLE_base_addressx,
+ dwarf::DW_RLE_offset_pair,
+ [](uint32_t Index) -> void {}))
continue;
const DebugAddressRange &Range = Ranges[I];
@@ -299,12 +297,11 @@ DebugRangeListsSectionWriter::addRanges(DebugAddressRangesVector &Ranges) {
encodeULEB128(Index, *CUBodyStream);
encodeULEB128(Range.HighPC - Range.LowPC, *CUBodyStream);
++I;
- WrittenStartxLength = true;
}
- if (WrittenStartxLength)
- support::endian::write(*CUBodyStream,
- static_cast<uint8_t>(dwarf::DW_RLE_end_of_list),
- llvm::endianness::little);
+
+ support::endian::write(*CUBodyStream,
+ static_cast<uint8_t>(dwarf::DW_RLE_end_of_list),
+ llvm::endianness::little);
CurrentOffset = CUBodyBuffer->size();
return RangeEntries.size() - 1;
}
@@ -688,7 +685,6 @@ static void writeDWARF5LocList(uint32_t &NumberOfEntries, DIEValue &AttrInfo,
}
std::vector<uint64_t> OffsetsArray;
- bool WrittenStartxLength = false;
auto writeExpression = [&](uint32_t Index) -> void {
const DebugLocationEntry &Entry = LocList[Index];
encodeULEB128(Entry.Expr.size(), LocBodyStream);
@@ -696,12 +692,11 @@ static void writeDWARF5LocList(uint32_t &NumberOfEntries, DIEValue &AttrInfo,
reinterpret_cast<const char *>(Entry.Expr.data()), Entry.Expr.size());
};
for (unsigned I = 0; I < LocList.size();) {
- WrittenStartxLength = false;
if (emitWithBase<DebugLocationsVector, dwarf::LoclistEntries,
- DebugLocationEntry>(
- LocBodyStream, LocList, AddrWriter, CU, I,
- dwarf::DW_LLE_base_addressx, dwarf::DW_LLE_offset_pair,
- dwarf::DW_LLE_end_of_list, writeExpression))
+ DebugLocationEntry>(LocBodyStream, LocList, AddrWriter, CU,
+ I, dwarf::DW_LLE_base_addressx,
+ dwarf::DW_LLE_offset_pair,
+ writeExpression))
continue;
const DebugLocationEntry &Entry = LocList[I];
@@ -713,13 +708,11 @@ static void writeDWARF5LocList(uint32_t &NumberOfEntries, DIEValue &AttrInfo,
encodeULEB128(Entry.HighPC - Entry.LowPC, LocBodyStream);
writeExpression(I);
++I;
- WrittenStartxLength = true;
}
- if (WrittenStartxLength)
- support::endian::write(LocBodyStream,
- static_cast<uint8_t>(dwarf::DW_LLE_end_of_list),
- llvm::endianness::little);
+ support::endian::write(LocBodyStream,
+ static_cast<uint8_t>(dwarf::DW_LLE_end_of_list),
+ llvm::endianness::little);
}
void DebugLoclistWriter::addList(DIEBuilder &DIEBldr, DIE &Die,
diff --git a/bolt/lib/Rewrite/DWARFRewriter.cpp b/bolt/lib/Rewrite/DWARFRewriter.cpp
index 27fa937..592b235 100644
--- a/bolt/lib/Rewrite/DWARFRewriter.cpp
+++ b/bolt/lib/Rewrite/DWARFRewriter.cpp
@@ -919,15 +919,10 @@ void DWARFRewriter::updateUnitDebugInfo(
DIEValue LowPCVal = Die->findAttribute(dwarf::DW_AT_low_pc);
DIEValue HighPCVal = Die->findAttribute(dwarf::DW_AT_high_pc);
if (FunctionRanges.empty()) {
- if (LowPCVal && HighPCVal) {
+ if (LowPCVal && HighPCVal)
FunctionRanges.push_back({0, HighPCVal.getDIEInteger().getValue()});
- } else {
- // I haven't seen this case, but who knows what other compilers
- // generate.
+ else
FunctionRanges.push_back({0, 1});
- errs() << "BOLT-WARNING: [internal-dwarf-error]: subprogram got GCed "
- "by the linker, DW_AT_ranges is used\n";
- }
}
if (FunctionRanges.size() == 1 && !opts::AlwaysConvertToRanges) {
diff --git a/bolt/test/X86/dwarf4-subprogram-single-gc-ranges.test b/bolt/test/X86/dwarf4-subprogram-single-gc-ranges.test
index 9080052..3e7e765 100644
--- a/bolt/test/X86/dwarf4-subprogram-single-gc-ranges.test
+++ b/bolt/test/X86/dwarf4-subprogram-single-gc-ranges.test
@@ -2,14 +2,12 @@
# RUN: llvm-mc -dwarf-version=4 -filetype=obj -triple x86_64-unknown-linux %p/Inputs/dwarf4-subprogram-single-gc-ranges-main.s -o %t1.o
# RUN: %clang %cflags %t1.o -o %t.exe -Wl,-q
-# RUN: llvm-bolt %t.exe -o %t.bolt --update-debug-sections &> %t1.txt
-# RUN: llvm-dwarfdump --show-form --verbose --debug-info %t.bolt >> %t1.txt
+# RUN: llvm-bolt %t.exe -o %t.bolt --update-debug-sections
+# RUN: llvm-dwarfdump --show-form --verbose --debug-info %t.bolt > %t1.txt
# RUN: cat %t1.txt | FileCheck --check-prefix=POSTCHECK %s
# This test checks BOLT correctly handles DW_TAG_subprogram with Ranges with single entry, when function was GCed.
-# POSTCHECK: BOLT-WARNING: [internal-dwarf-error]: subprogram got GCed by the linker, DW_AT_ranges is used
-
# POSTCHECK: DW_TAG_subprogram
# POSTCHECK-NEXT: DW_AT_frame_base
# POSTCHECK-NEXT: DW_AT_linkage_name
diff --git a/bolt/test/X86/dwarf5-empty-function-ranges.s b/bolt/test/X86/dwarf5-empty-function-ranges.s
new file mode 100644
index 0000000..bfa3178
--- /dev/null
+++ b/bolt/test/X86/dwarf5-empty-function-ranges.s
@@ -0,0 +1,538 @@
+# REQUIRES: system-linux
+
+# RUN: llvm-mc -dwarf-version=5 -filetype=obj -triple x86_64-unknown-linux %s -o %t1.o
+# RUN: %clang %cflags -dwarf-5 %t1.o -o %t.exe -Wl,-q -Wl,-gc-sections -fuse-ld=lld -Wl,--entry=main
+# RUN: llvm-bolt %t.exe -o %t.bolt --update-debug-sections
+# RUN: llvm-dwarfdump --debug-info %t.exe | FileCheck --check-prefix=PRECHECK %s
+# RUN: llvm-dwarfdump --debug-info %t.bolt | FileCheck --check-prefix=POSTCHECK %s
+
+# PRECHECK: DW_TAG_subprogram
+# PRECHECK-NEXT: DW_AT_ranges
+# PRECHECK-NEXT: [0x0000000000000000
+# PRECHECK-NEXT: [0x0000000000000000
+# PRECHECK-NEXT: [0x0000000000000000
+# PRECHECK-NEXT: [0x0000000000000000
+# PRECHECK-NEXT: DW_AT_frame_base
+# PRECHECK-NEXT: DW_AT_linkage_name ("_Z6helperi")
+# PRECHECK-NEXT: DW_AT_name ("helper")
+
+# POSTCHECK: DW_TAG_subprogram
+# POSTCHECK-NEXT: DW_AT_frame_base
+# POSTCHECK-NEXT: DW_AT_linkage_name ("_Z6helperi")
+# POSTCHECK-NEXT: DW_AT_name ("helper")
+# POSTCHECK-NEXT: DW_AT_decl_file
+# POSTCHECK-NEXT: DW_AT_decl_line
+# POSTCHECK-NEXT: DW_AT_type
+# POSTCHECK-NEXT: DW_AT_external
+# POSTCHECK-NEXT: DW_AT_low_pc (0x0000000000000000)
+# POSTCHECK-NEXT: DW_AT_high_pc (0x0000000000000001)
+
+## Tests BOLT path that handles DW_AT_ranges with no output function ranges.
+
+## clang++ main.cpp -O0 -fno-inline-functions -fbasic-block-sections=all -g2 -S
+## int helper(int argc) {
+## int x = argc;
+## if (x == 3)
+## x++;
+## else
+## x--;
+## return x;
+## }
+## int main(int argc, char *argv[]) {
+## int x = argc;
+## if (x == 3)
+## x++;
+## else
+## x--;
+## return x;
+## }
+
+ .text
+ .file "main.cpp"
+ .section .text._Z6helperi,"ax",@progbits
+ .globl _Z6helperi # -- Begin function _Z6helperi
+ .p2align 4, 0x90
+ .type _Z6helperi,@function
+_Z6helperi: # @_Z6helperi
+.Lfunc_begin0:
+ .file 0 "/repro2" "main.cpp" md5 0x888a2704226ec400f256aa9c2207456c
+ .loc 0 1 0 # main.cpp:1:0
+ .cfi_startproc
+# %bb.0: # %entry
+ pushq %rbp
+ .cfi_def_cfa_offset 16
+ .cfi_offset %rbp, -16
+ movq %rsp, %rbp
+ .cfi_def_cfa_register %rbp
+ movl %edi, -4(%rbp)
+.Ltmp0:
+ .loc 0 2 11 prologue_end # main.cpp:2:11
+ movl -4(%rbp), %eax
+ .loc 0 2 7 is_stmt 0 # main.cpp:2:7
+ movl %eax, -8(%rbp)
+.Ltmp1:
+ .loc 0 3 9 is_stmt 1 # main.cpp:3:9
+ cmpl $3, -8(%rbp)
+.Ltmp2:
+ .loc 0 3 7 is_stmt 0 # main.cpp:3:7
+ jne _Z6helperi.__part.2
+ jmp _Z6helperi.__part.1
+.LBB_END0_0:
+ .cfi_endproc
+ .section .text._Z6helperi,"ax",@progbits,unique,1
+_Z6helperi.__part.1: # %if.then
+ .cfi_startproc
+ .cfi_def_cfa %rbp, 16
+ .cfi_offset %rbp, -16
+ .loc 0 4 6 is_stmt 1 # main.cpp:4:6
+ movl -8(%rbp), %eax
+ addl $1, %eax
+ movl %eax, -8(%rbp)
+ .loc 0 4 5 is_stmt 0 # main.cpp:4:5
+ jmp _Z6helperi.__part.3
+.LBB_END0_1:
+ .size _Z6helperi.__part.1, .LBB_END0_1-_Z6helperi.__part.1
+ .cfi_endproc
+ .section .text._Z6helperi,"ax",@progbits,unique,2
+_Z6helperi.__part.2: # %if.else
+ .cfi_startproc
+ .cfi_def_cfa %rbp, 16
+ .cfi_offset %rbp, -16
+ .loc 0 6 6 is_stmt 1 # main.cpp:6:6
+ movl -8(%rbp), %eax
+ addl $-1, %eax
+ movl %eax, -8(%rbp)
+ jmp _Z6helperi.__part.3
+.LBB_END0_2:
+ .size _Z6helperi.__part.2, .LBB_END0_2-_Z6helperi.__part.2
+ .cfi_endproc
+ .section .text._Z6helperi,"ax",@progbits,unique,3
+_Z6helperi.__part.3: # %if.end
+ .cfi_startproc
+ .cfi_def_cfa %rbp, 16
+ .cfi_offset %rbp, -16
+ .loc 0 7 10 # main.cpp:7:10
+ movl -8(%rbp), %eax
+ .loc 0 7 3 epilogue_begin is_stmt 0 # main.cpp:7:3
+ popq %rbp
+ .cfi_def_cfa %rsp, 8
+ retq
+.LBB_END0_3:
+ .size _Z6helperi.__part.3, .LBB_END0_3-_Z6helperi.__part.3
+ .cfi_endproc
+ .section .text._Z6helperi,"ax",@progbits
+.Lfunc_end0:
+ .size _Z6helperi, .Lfunc_end0-_Z6helperi
+ # -- End function
+ .section .text.main,"ax",@progbits
+ .globl main # -- Begin function main
+ .p2align 4, 0x90
+ .type main,@function
+main: # @main
+.Lfunc_begin1:
+ .loc 0 9 0 is_stmt 1 # main.cpp:9:0
+ .cfi_startproc
+# %bb.0: # %entry
+ pushq %rbp
+ .cfi_def_cfa_offset 16
+ .cfi_offset %rbp, -16
+ movq %rsp, %rbp
+ .cfi_def_cfa_register %rbp
+ movl $0, -4(%rbp)
+ movl %edi, -8(%rbp)
+ movq %rsi, -16(%rbp)
+.Ltmp3:
+ .loc 0 10 11 prologue_end # main.cpp:10:11
+ movl -8(%rbp), %eax
+ .loc 0 10 7 is_stmt 0 # main.cpp:10:7
+ movl %eax, -20(%rbp)
+.Ltmp4:
+ .loc 0 11 9 is_stmt 1 # main.cpp:11:9
+ cmpl $3, -20(%rbp)
+.Ltmp5:
+ .loc 0 11 7 is_stmt 0 # main.cpp:11:7
+ jne main.__part.2
+ jmp main.__part.1
+.LBB_END1_0:
+ .cfi_endproc
+ .section .text.main,"ax",@progbits,unique,4
+main.__part.1: # %if.then
+ .cfi_startproc
+ .cfi_def_cfa %rbp, 16
+ .cfi_offset %rbp, -16
+ .loc 0 12 6 is_stmt 1 # main.cpp:12:6
+ movl -20(%rbp), %eax
+ addl $1, %eax
+ movl %eax, -20(%rbp)
+ .loc 0 12 5 is_stmt 0 # main.cpp:12:5
+ jmp main.__part.3
+.LBB_END1_1:
+ .size main.__part.1, .LBB_END1_1-main.__part.1
+ .cfi_endproc
+ .section .text.main,"ax",@progbits,unique,5
+main.__part.2: # %if.else
+ .cfi_startproc
+ .cfi_def_cfa %rbp, 16
+ .cfi_offset %rbp, -16
+ .loc 0 14 6 is_stmt 1 # main.cpp:14:6
+ movl -20(%rbp), %eax
+ addl $-1, %eax
+ movl %eax, -20(%rbp)
+ jmp main.__part.3
+.LBB_END1_2:
+ .size main.__part.2, .LBB_END1_2-main.__part.2
+ .cfi_endproc
+ .section .text.main,"ax",@progbits,unique,6
+main.__part.3: # %if.end
+ .cfi_startproc
+ .cfi_def_cfa %rbp, 16
+ .cfi_offset %rbp, -16
+ .loc 0 15 10 # main.cpp:15:10
+ movl -20(%rbp), %eax
+ .loc 0 15 3 epilogue_begin is_stmt 0 # main.cpp:15:3
+ popq %rbp
+ .cfi_def_cfa %rsp, 8
+ retq
+.LBB_END1_3:
+ .size main.__part.3, .LBB_END1_3-main.__part.3
+ .cfi_endproc
+ .section .text.main,"ax",@progbits
+.Lfunc_end1:
+ .size main, .Lfunc_end1-main
+ # -- End function
+ .section .debug_abbrev,"",@progbits
+ .byte 1 # Abbreviation Code
+ .byte 17 # DW_TAG_compile_unit
+ .byte 1 # DW_CHILDREN_yes
+ .byte 37 # DW_AT_producer
+ .byte 37 # DW_FORM_strx1
+ .byte 19 # DW_AT_language
+ .byte 5 # DW_FORM_data2
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 114 # DW_AT_str_offsets_base
+ .byte 23 # DW_FORM_sec_offset
+ .byte 16 # DW_AT_stmt_list
+ .byte 23 # DW_FORM_sec_offset
+ .byte 27 # DW_AT_comp_dir
+ .byte 37 # DW_FORM_strx1
+ .byte 17 # DW_AT_low_pc
+ .byte 1 # DW_FORM_addr
+ .byte 85 # DW_AT_ranges
+ .byte 35 # DW_FORM_rnglistx
+ .byte 115 # DW_AT_addr_base
+ .byte 23 # DW_FORM_sec_offset
+ .byte 116 # DW_AT_rnglists_base
+ .byte 23 # DW_FORM_sec_offset
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 2 # Abbreviation Code
+ .byte 46 # DW_TAG_subprogram
+ .byte 1 # DW_CHILDREN_yes
+ .byte 85 # DW_AT_ranges
+ .byte 35 # DW_FORM_rnglistx
+ .byte 64 # DW_AT_frame_base
+ .byte 24 # DW_FORM_exprloc
+ .byte 110 # DW_AT_linkage_name
+ .byte 37 # DW_FORM_strx1
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 63 # DW_AT_external
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 3 # Abbreviation Code
+ .byte 5 # DW_TAG_formal_parameter
+ .byte 0 # DW_CHILDREN_no
+ .byte 2 # DW_AT_location
+ .byte 24 # DW_FORM_exprloc
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 4 # Abbreviation Code
+ .byte 52 # DW_TAG_variable
+ .byte 0 # DW_CHILDREN_no
+ .byte 2 # DW_AT_location
+ .byte 24 # DW_FORM_exprloc
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 5 # Abbreviation Code
+ .byte 46 # DW_TAG_subprogram
+ .byte 1 # DW_CHILDREN_yes
+ .byte 85 # DW_AT_ranges
+ .byte 35 # DW_FORM_rnglistx
+ .byte 64 # DW_AT_frame_base
+ .byte 24 # DW_FORM_exprloc
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 63 # DW_AT_external
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 6 # Abbreviation Code
+ .byte 36 # DW_TAG_base_type
+ .byte 0 # DW_CHILDREN_no
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 62 # DW_AT_encoding
+ .byte 11 # DW_FORM_data1
+ .byte 11 # DW_AT_byte_size
+ .byte 11 # DW_FORM_data1
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 7 # Abbreviation Code
+ .byte 15 # DW_TAG_pointer_type
+ .byte 0 # DW_CHILDREN_no
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 0 # EOM(3)
+ .section .debug_info,"",@progbits
+.Lcu_begin0:
+ .long .Ldebug_info_end0-.Ldebug_info_start0 # Length of Unit
+.Ldebug_info_start0:
+ .short 5 # DWARF version number
+ .byte 1 # DWARF Unit Type
+ .byte 8 # Address Size (in bytes)
+ .long .debug_abbrev # Offset Into Abbrev. Section
+ .byte 1 # Abbrev [1] 0xc:0x82 DW_TAG_compile_unit
+ .byte 0 # DW_AT_producer
+ .short 33 # DW_AT_language
+ .byte 1 # DW_AT_name
+ .long .Lstr_offsets_base0 # DW_AT_str_offsets_base
+ .long .Lline_table_start0 # DW_AT_stmt_list
+ .byte 2 # DW_AT_comp_dir
+ .quad 0 # DW_AT_low_pc
+ .byte 2 # DW_AT_ranges
+ .long .Laddr_table_base0 # DW_AT_addr_base
+ .long .Lrnglists_table_base0 # DW_AT_rnglists_base
+ .byte 2 # Abbrev [2] 0x2b:0x23 DW_TAG_subprogram
+ .byte 0 # DW_AT_ranges
+ .byte 1 # DW_AT_frame_base
+ .byte 86
+ .byte 3 # DW_AT_linkage_name
+ .byte 4 # DW_AT_name
+ .byte 0 # DW_AT_decl_file
+ .byte 1 # DW_AT_decl_line
+ .long 123 # DW_AT_type
+ # DW_AT_external
+ .byte 3 # Abbrev [3] 0x37:0xb DW_TAG_formal_parameter
+ .byte 2 # DW_AT_location
+ .byte 145
+ .byte 124
+ .byte 7 # DW_AT_name
+ .byte 0 # DW_AT_decl_file
+ .byte 1 # DW_AT_decl_line
+ .long 123 # DW_AT_type
+ .byte 4 # Abbrev [4] 0x42:0xb DW_TAG_variable
+ .byte 2 # DW_AT_location
+ .byte 145
+ .byte 120
+ .byte 8 # DW_AT_name
+ .byte 0 # DW_AT_decl_file
+ .byte 2 # DW_AT_decl_line
+ .long 123 # DW_AT_type
+ .byte 0 # End Of Children Mark
+ .byte 5 # Abbrev [5] 0x4e:0x2d DW_TAG_subprogram
+ .byte 1 # DW_AT_ranges
+ .byte 1 # DW_AT_frame_base
+ .byte 86
+ .byte 6 # DW_AT_name
+ .byte 0 # DW_AT_decl_file
+ .byte 9 # DW_AT_decl_line
+ .long 123 # DW_AT_type
+ # DW_AT_external
+ .byte 3 # Abbrev [3] 0x59:0xb DW_TAG_formal_parameter
+ .byte 2 # DW_AT_location
+ .byte 145
+ .byte 120
+ .byte 7 # DW_AT_name
+ .byte 0 # DW_AT_decl_file
+ .byte 9 # DW_AT_decl_line
+ .long 123 # DW_AT_type
+ .byte 3 # Abbrev [3] 0x64:0xb DW_TAG_formal_parameter
+ .byte 2 # DW_AT_location
+ .byte 145
+ .byte 112
+ .byte 9 # DW_AT_name
+ .byte 0 # DW_AT_decl_file
+ .byte 9 # DW_AT_decl_line
+ .long 127 # DW_AT_type
+ .byte 4 # Abbrev [4] 0x6f:0xb DW_TAG_variable
+ .byte 2 # DW_AT_location
+ .byte 145
+ .byte 108
+ .byte 8 # DW_AT_name
+ .byte 0 # DW_AT_decl_file
+ .byte 10 # DW_AT_decl_line
+ .long 123 # DW_AT_type
+ .byte 0 # End Of Children Mark
+ .byte 6 # Abbrev [6] 0x7b:0x4 DW_TAG_base_type
+ .byte 5 # DW_AT_name
+ .byte 5 # DW_AT_encoding
+ .byte 4 # DW_AT_byte_size
+ .byte 7 # Abbrev [7] 0x7f:0x5 DW_TAG_pointer_type
+ .long 132 # DW_AT_type
+ .byte 7 # Abbrev [7] 0x84:0x5 DW_TAG_pointer_type
+ .long 137 # DW_AT_type
+ .byte 6 # Abbrev [6] 0x89:0x4 DW_TAG_base_type
+ .byte 10 # DW_AT_name
+ .byte 6 # DW_AT_encoding
+ .byte 1 # DW_AT_byte_size
+ .byte 0 # End Of Children Mark
+.Ldebug_info_end0:
+ .section .debug_rnglists,"",@progbits
+ .long .Ldebug_list_header_end0-.Ldebug_list_header_start0 # Length
+.Ldebug_list_header_start0:
+ .short 5 # Version
+ .byte 8 # Address size
+ .byte 0 # Segment selector size
+ .long 3 # Offset entry count
+.Lrnglists_table_base0:
+ .long .Ldebug_ranges0-.Lrnglists_table_base0
+ .long .Ldebug_ranges1-.Lrnglists_table_base0
+ .long .Ldebug_ranges2-.Lrnglists_table_base0
+.Ldebug_ranges0:
+ .byte 3 # DW_RLE_startx_length
+ .byte 0 # start index
+ .uleb128 .LBB_END0_1-_Z6helperi.__part.1 # length
+ .byte 3 # DW_RLE_startx_length
+ .byte 1 # start index
+ .uleb128 .LBB_END0_2-_Z6helperi.__part.2 # length
+ .byte 3 # DW_RLE_startx_length
+ .byte 2 # start index
+ .uleb128 .LBB_END0_3-_Z6helperi.__part.3 # length
+ .byte 3 # DW_RLE_startx_length
+ .byte 3 # start index
+ .uleb128 .Lfunc_end0-.Lfunc_begin0 # length
+ .byte 0 # DW_RLE_end_of_list
+.Ldebug_ranges1:
+ .byte 3 # DW_RLE_startx_length
+ .byte 4 # start index
+ .uleb128 .LBB_END1_1-main.__part.1 # length
+ .byte 3 # DW_RLE_startx_length
+ .byte 5 # start index
+ .uleb128 .LBB_END1_2-main.__part.2 # length
+ .byte 3 # DW_RLE_startx_length
+ .byte 6 # start index
+ .uleb128 .LBB_END1_3-main.__part.3 # length
+ .byte 3 # DW_RLE_startx_length
+ .byte 7 # start index
+ .uleb128 .Lfunc_end1-.Lfunc_begin1 # length
+ .byte 0 # DW_RLE_end_of_list
+.Ldebug_ranges2:
+ .byte 3 # DW_RLE_startx_length
+ .byte 0 # start index
+ .uleb128 .LBB_END0_1-_Z6helperi.__part.1 # length
+ .byte 3 # DW_RLE_startx_length
+ .byte 1 # start index
+ .uleb128 .LBB_END0_2-_Z6helperi.__part.2 # length
+ .byte 3 # DW_RLE_startx_length
+ .byte 2 # start index
+ .uleb128 .LBB_END0_3-_Z6helperi.__part.3 # length
+ .byte 3 # DW_RLE_startx_length
+ .byte 3 # start index
+ .uleb128 .Lfunc_end0-.Lfunc_begin0 # length
+ .byte 3 # DW_RLE_startx_length
+ .byte 4 # start index
+ .uleb128 .LBB_END1_1-main.__part.1 # length
+ .byte 3 # DW_RLE_startx_length
+ .byte 5 # start index
+ .uleb128 .LBB_END1_2-main.__part.2 # length
+ .byte 3 # DW_RLE_startx_length
+ .byte 6 # start index
+ .uleb128 .LBB_END1_3-main.__part.3 # length
+ .byte 3 # DW_RLE_startx_length
+ .byte 7 # start index
+ .uleb128 .Lfunc_end1-.Lfunc_begin1 # length
+ .byte 0 # DW_RLE_end_of_list
+.Ldebug_list_header_end0:
+ .section .debug_str_offsets,"",@progbits
+ .long 48 # Length of String Offsets Set
+ .short 5
+ .short 0
+.Lstr_offsets_base0:
+ .section .debug_str,"MS",@progbits,1
+.Linfo_string0:
+ .asciz "clang version 19.0.0git (git@github.com:ayermolo/llvm-project.git a1d8664d409cac2a923176a8e9a731385bde279e)" # string offset=0
+.Linfo_string1:
+ .asciz "main.cpp" # string offset=108
+.Linfo_string2:
+ .asciz "/repro2" # string offset=117
+.Linfo_string3:
+ .asciz "_Z6helperi" # string offset=162
+.Linfo_string4:
+ .asciz "helper" # string offset=173
+.Linfo_string5:
+ .asciz "int" # string offset=180
+.Linfo_string6:
+ .asciz "main" # string offset=184
+.Linfo_string7:
+ .asciz "argc" # string offset=189
+.Linfo_string8:
+ .asciz "x" # string offset=194
+.Linfo_string9:
+ .asciz "argv" # string offset=196
+.Linfo_string10:
+ .asciz "char" # string offset=201
+ .section .debug_str_offsets,"",@progbits
+ .long .Linfo_string0
+ .long .Linfo_string1
+ .long .Linfo_string2
+ .long .Linfo_string3
+ .long .Linfo_string4
+ .long .Linfo_string5
+ .long .Linfo_string6
+ .long .Linfo_string7
+ .long .Linfo_string8
+ .long .Linfo_string9
+ .long .Linfo_string10
+ .section .debug_addr,"",@progbits
+ .long .Ldebug_addr_end0-.Ldebug_addr_start0 # Length of contribution
+.Ldebug_addr_start0:
+ .short 5 # DWARF version number
+ .byte 8 # Address size
+ .byte 0 # Segment selector size
+.Laddr_table_base0:
+ .quad _Z6helperi.__part.1
+ .quad _Z6helperi.__part.2
+ .quad _Z6helperi.__part.3
+ .quad .Lfunc_begin0
+ .quad main.__part.1
+ .quad main.__part.2
+ .quad main.__part.3
+ .quad .Lfunc_begin1
+.Ldebug_addr_end0:
+ .ident "clang version 19.0.0git (git@github.com:ayermolo/llvm-project.git a1d8664d409cac2a923176a8e9a731385bde279e)"
+ .section ".note.GNU-stack","",@progbits
+ .addrsig
+ .section .debug_line,"",@progbits
+.Lline_table_start0:
diff --git a/bolt/test/X86/dwarf5-loclist-out-of-order.s b/bolt/test/X86/dwarf5-loclist-out-of-order.s
new file mode 100644
index 0000000..acd0bfa
--- /dev/null
+++ b/bolt/test/X86/dwarf5-loclist-out-of-order.s
@@ -0,0 +1,485 @@
+# REQUIRES: system-linux
+
+# RUN: llvm-mc -dwarf-version=5 -filetype=obj -triple x86_64-unknown-linux %s -o %t1.o
+# RUN: %clang %cflags -dwarf-5 %t1.o -o %t.exe -Wl,-q
+# RUN: llvm-bolt %t.exe -o %t.bolt --update-debug-sections --skip-funcs=main
+# RUN: llvm-dwarfdump --show-form --verbose --debug-loclists --debug-addr %t.bolt > %t.txt
+# RUN: llvm-dwarfdump --show-form --verbose --debug-info %t.bolt >> %t.txt
+# RUN: cat %t.txt | FileCheck --check-prefix=POSTCHECK %s
+
+## Tests to make sure BOLT handles correctly locations that are out of order, and the function is not being processed.
+
+# POSTCHECK: DW_LLE_base_addressx
+# POSTCHECK-NEXT: DW_LLE_offset_pair
+# POSTCHECK-NEXT: DW_LLE_offset_pair
+# POSTCHECK-NEXT: DW_LLE_startx_length
+# POSTCHECK-NEXT: DW_LLE_end_of_list
+# POSTCHECK: DW_LLE_base_addressx
+# POSTCHECK-NEXT: DW_LLE_offset_pair
+# POSTCHECK-NEXT: DW_LLE_offset_pair
+# POSTCHECK-NEXT: DW_LLE_end_of_list
+
+# POSTCHECK: Addrs: [
+# POSTCHECK-NEXT: 0x[[#%.16x,ADDR:]]
+# POSTCHECK-NEXT: 0x
+# POSTCHECK-NEXT: 0x
+# POSTCHECK-NEXT: 0x[[#%.16x,ADDR1:]]
+# POSTCHECK-NEXT: 0x
+# POSTCHECK-NEXT: 0x
+
+# POSTCHECK: DW_TAG_formal_parameter
+# POSTCHECK: DW_TAG_formal_parameter
+# POSTCHECK-NEXT: DW_AT_location
+# POSTCHECK-NEXT: [0x[[#ADDR1]], 0x[[#ADDR1 + 0x1a]]): DW_OP_reg3 RBX
+# POSTCHECK-NEXT: [0x[[#ADDR1 + 0x1a]], 0x[[#ADDR1 + 0x1d]]): DW_OP_entry_value(DW_OP_reg5 RDI), DW_OP_stack_value
+# POSTCHECK-NEXT: [0x[[#ADDR]], 0x[[#ADDR + 0x12]]): DW_OP_reg5 RDI)
+
+## clang++ main.cpp -fno-inline-functions -g2 -O2 -S
+## void use(int * x) {
+## *x += 4;
+## }
+## int main(int argc, char *argv[]) {
+## int x = argc;
+## use(&x);
+## x = x + argc;
+## use(&x);
+## return x;
+## }
+
+## Test was manually modified to re-order locations.
+ .text
+ .file "main.cpp"
+ .globl _Z3usePi # -- Begin function _Z3usePi
+ .p2align 4, 0x90
+ .type _Z3usePi,@function
+_Z3usePi: # @_Z3usePi
+.Lfunc_begin0:
+ .file 0 "/repro" "main.cpp" md5 0xe24a1d6afb5e23ce0028f1f33bc08cd7
+ .cfi_startproc
+# %bb.0: # %entry
+ #DEBUG_VALUE: use:x <- $rdi
+ .loc 0 2 8 prologue_end # main.cpp:2:8
+ addl $4, (%rdi)
+ .loc 0 3 1 # main.cpp:3:1
+ retq
+.Ltmp0:
+.Lfunc_end0:
+ .size _Z3usePi, .Lfunc_end0-_Z3usePi
+ .cfi_endproc
+ # -- End function
+ .globl main # -- Begin function main
+ .p2align 4, 0x90
+ .type main,@function
+main: # @main
+.Lfunc_begin1:
+ .loc 0 4 0 # main.cpp:4:0
+ .cfi_startproc
+# %bb.0: # %entry
+ #DEBUG_VALUE: main:argc <- $edi
+ #DEBUG_VALUE: main:argv <- $rsi
+ pushq %r14
+ .cfi_def_cfa_offset 16
+ pushq %rbx
+ .cfi_def_cfa_offset 24
+ pushq %rax
+ .cfi_def_cfa_offset 32
+ .cfi_offset %rbx, -24
+ .cfi_offset %r14, -16
+ movl %edi, %ebx
+.Ltmp1:
+ .loc 0 5 7 prologue_end # main.cpp:5:7
+ movl %edi, 4(%rsp)
+ leaq 4(%rsp), %r14
+ .loc 0 6 3 # main.cpp:6:3
+ movq %r14, %rdi
+.Ltmp2:
+ #DEBUG_VALUE: main:argc <- $ebx
+ callq _Z3usePi
+.Ltmp3:
+ #DEBUG_VALUE: main:argv <- [DW_OP_LLVM_entry_value 1] $rsi
+ .loc 0 7 5 # main.cpp:7:5
+ addl %ebx, 4(%rsp)
+ .loc 0 8 3 # main.cpp:8:3
+ movq %r14, %rdi
+ callq _Z3usePi
+.Ltmp4:
+ .loc 0 9 10 # main.cpp:9:10
+ movl 4(%rsp), %eax
+ .loc 0 9 3 epilogue_begin is_stmt 0 # main.cpp:9:3
+ addq $8, %rsp
+ .cfi_def_cfa_offset 24
+ popq %rbx
+.Ltmp5:
+ #DEBUG_VALUE: main:argc <- [DW_OP_LLVM_entry_value 1] $edi
+ .cfi_def_cfa_offset 16
+ popq %r14
+ .cfi_def_cfa_offset 8
+ retq
+.Ltmp6:
+.Lfunc_end1:
+ .size main, .Lfunc_end1-main
+ .cfi_endproc
+ # -- End function
+ .section .debug_loclists,"",@progbits
+ .long .Ldebug_list_header_end0-.Ldebug_list_header_start0 # Length
+.Ldebug_list_header_start0:
+ .short 5 # Version
+ .byte 8 # Address size
+ .byte 0 # Segment selector size
+ .long 2 # Offset entry count
+.Lloclists_table_base0:
+ .long .Ldebug_loc0-.Lloclists_table_base0
+ .long .Ldebug_loc1-.Lloclists_table_base0
+.Ldebug_loc0:
+ .byte 4 # DW_LLE_offset_pair
+ .uleb128 .Ltmp2-.Lfunc_begin0 # starting offset
+ .uleb128 .Ltmp5-.Lfunc_begin0 # ending offset
+ .byte 1 # Loc expr size
+ .byte 83 # super-register DW_OP_reg3
+ .byte 4 # DW_LLE_offset_pair
+ .uleb128 .Ltmp5-.Lfunc_begin0 # starting offset
+ .uleb128 .Lfunc_end1-.Lfunc_begin0 # ending offset
+ .byte 4 # Loc expr size
+ .byte 163 # DW_OP_entry_value
+ .byte 1 # 1
+ .byte 85 # super-register DW_OP_reg5
+ .byte 159 # DW_OP_stack_value
+ .byte 4 # DW_LLE_offset_pair #manually moved out of order
+ .uleb128 .Lfunc_begin1-.Lfunc_begin0 # starting offset
+ .uleb128 .Ltmp2-.Lfunc_begin0 # ending offset
+ .byte 1 # Loc expr size
+ .byte 85 # super-register DW_OP_reg5
+ .byte 0 # DW_LLE_end_of_list
+.Ldebug_loc1:
+ .byte 4 # DW_LLE_offset_pair
+ .uleb128 .Lfunc_begin1-.Lfunc_begin0 # starting offset
+ .uleb128 .Ltmp3-.Lfunc_begin0 # ending offset
+ .byte 1 # Loc expr size
+ .byte 84 # DW_OP_reg4
+ .byte 4 # DW_LLE_offset_pair
+ .uleb128 .Ltmp3-.Lfunc_begin0 # starting offset
+ .uleb128 .Lfunc_end1-.Lfunc_begin0 # ending offset
+ .byte 4 # Loc expr size
+ .byte 163 # DW_OP_entry_value
+ .byte 1 # 1
+ .byte 84 # DW_OP_reg4
+ .byte 159 # DW_OP_stack_value
+ .byte 0 # DW_LLE_end_of_list
+.Ldebug_list_header_end0:
+ .section .debug_abbrev,"",@progbits
+ .byte 1 # Abbreviation Code
+ .byte 17 # DW_TAG_compile_unit
+ .byte 1 # DW_CHILDREN_yes
+ .byte 37 # DW_AT_producer
+ .byte 37 # DW_FORM_strx1
+ .byte 19 # DW_AT_language
+ .byte 5 # DW_FORM_data2
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 114 # DW_AT_str_offsets_base
+ .byte 23 # DW_FORM_sec_offset
+ .byte 16 # DW_AT_stmt_list
+ .byte 23 # DW_FORM_sec_offset
+ .byte 27 # DW_AT_comp_dir
+ .byte 37 # DW_FORM_strx1
+ .byte 17 # DW_AT_low_pc
+ .byte 27 # DW_FORM_addrx
+ .byte 18 # DW_AT_high_pc
+ .byte 6 # DW_FORM_data4
+ .byte 115 # DW_AT_addr_base
+ .byte 23 # DW_FORM_sec_offset
+ .ascii "\214\001" # DW_AT_loclists_base
+ .byte 23 # DW_FORM_sec_offset
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 2 # Abbreviation Code
+ .byte 46 # DW_TAG_subprogram
+ .byte 1 # DW_CHILDREN_yes
+ .byte 17 # DW_AT_low_pc
+ .byte 27 # DW_FORM_addrx
+ .byte 18 # DW_AT_high_pc
+ .byte 6 # DW_FORM_data4
+ .byte 64 # DW_AT_frame_base
+ .byte 24 # DW_FORM_exprloc
+ .byte 122 # DW_AT_call_all_calls
+ .byte 25 # DW_FORM_flag_present
+ .byte 110 # DW_AT_linkage_name
+ .byte 37 # DW_FORM_strx1
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 63 # DW_AT_external
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 3 # Abbreviation Code
+ .byte 5 # DW_TAG_formal_parameter
+ .byte 0 # DW_CHILDREN_no
+ .byte 2 # DW_AT_location
+ .byte 24 # DW_FORM_exprloc
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 4 # Abbreviation Code
+ .byte 46 # DW_TAG_subprogram
+ .byte 1 # DW_CHILDREN_yes
+ .byte 17 # DW_AT_low_pc
+ .byte 27 # DW_FORM_addrx
+ .byte 18 # DW_AT_high_pc
+ .byte 6 # DW_FORM_data4
+ .byte 64 # DW_AT_frame_base
+ .byte 24 # DW_FORM_exprloc
+ .byte 122 # DW_AT_call_all_calls
+ .byte 25 # DW_FORM_flag_present
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 63 # DW_AT_external
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 5 # Abbreviation Code
+ .byte 5 # DW_TAG_formal_parameter
+ .byte 0 # DW_CHILDREN_no
+ .byte 2 # DW_AT_location
+ .byte 34 # DW_FORM_loclistx
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 6 # Abbreviation Code
+ .byte 52 # DW_TAG_variable
+ .byte 0 # DW_CHILDREN_no
+ .byte 2 # DW_AT_location
+ .byte 24 # DW_FORM_exprloc
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 7 # Abbreviation Code
+ .byte 72 # DW_TAG_call_site
+ .byte 1 # DW_CHILDREN_yes
+ .byte 127 # DW_AT_call_origin
+ .byte 19 # DW_FORM_ref4
+ .byte 125 # DW_AT_call_return_pc
+ .byte 27 # DW_FORM_addrx
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 8 # Abbreviation Code
+ .byte 73 # DW_TAG_call_site_parameter
+ .byte 0 # DW_CHILDREN_no
+ .byte 2 # DW_AT_location
+ .byte 24 # DW_FORM_exprloc
+ .byte 126 # DW_AT_call_value
+ .byte 24 # DW_FORM_exprloc
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 9 # Abbreviation Code
+ .byte 36 # DW_TAG_base_type
+ .byte 0 # DW_CHILDREN_no
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 62 # DW_AT_encoding
+ .byte 11 # DW_FORM_data1
+ .byte 11 # DW_AT_byte_size
+ .byte 11 # DW_FORM_data1
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 10 # Abbreviation Code
+ .byte 15 # DW_TAG_pointer_type
+ .byte 0 # DW_CHILDREN_no
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 0 # EOM(3)
+ .section .debug_info,"",@progbits
+.Lcu_begin0:
+ .long .Ldebug_info_end0-.Ldebug_info_start0 # Length of Unit
+.Ldebug_info_start0:
+ .short 5 # DWARF version number
+ .byte 1 # DWARF Unit Type
+ .byte 8 # Address Size (in bytes)
+ .long .debug_abbrev # Offset Into Abbrev. Section
+ .byte 1 # Abbrev [1] 0xc:0x91 DW_TAG_compile_unit
+ .byte 0 # DW_AT_producer
+ .short 33 # DW_AT_language
+ .byte 1 # DW_AT_name
+ .long .Lstr_offsets_base0 # DW_AT_str_offsets_base
+ .long .Lline_table_start0 # DW_AT_stmt_list
+ .byte 2 # DW_AT_comp_dir
+ .byte 0 # DW_AT_low_pc
+ .long .Lfunc_end1-.Lfunc_begin0 # DW_AT_high_pc
+ .long .Laddr_table_base0 # DW_AT_addr_base
+ .long .Lloclists_table_base0 # DW_AT_loclists_base
+ .byte 2 # Abbrev [2] 0x27:0x17 DW_TAG_subprogram
+ .byte 0 # DW_AT_low_pc
+ .long .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc
+ .byte 1 # DW_AT_frame_base
+ .byte 87
+ # DW_AT_call_all_calls
+ .byte 3 # DW_AT_linkage_name
+ .byte 4 # DW_AT_name
+ .byte 0 # DW_AT_decl_file
+ .byte 1 # DW_AT_decl_line
+ # DW_AT_external
+ .byte 3 # Abbrev [3] 0x33:0xa DW_TAG_formal_parameter
+ .byte 1 # DW_AT_location
+ .byte 85
+ .byte 7 # DW_AT_name
+ .byte 0 # DW_AT_decl_file
+ .byte 1 # DW_AT_decl_line
+ .long 137 # DW_AT_type
+ .byte 0 # End Of Children Mark
+ .byte 4 # Abbrev [4] 0x3e:0x47 DW_TAG_subprogram
+ .byte 1 # DW_AT_low_pc
+ .long .Lfunc_end1-.Lfunc_begin1 # DW_AT_high_pc
+ .byte 1 # DW_AT_frame_base
+ .byte 87
+ # DW_AT_call_all_calls
+ .byte 5 # DW_AT_name
+ .byte 0 # DW_AT_decl_file
+ .byte 4 # DW_AT_decl_line
+ .long 133 # DW_AT_type
+ # DW_AT_external
+ .byte 5 # Abbrev [5] 0x4d:0x9 DW_TAG_formal_parameter
+ .byte 0 # DW_AT_location
+ .byte 8 # DW_AT_name
+ .byte 0 # DW_AT_decl_file
+ .byte 4 # DW_AT_decl_line
+ .long 133 # DW_AT_type
+ .byte 5 # Abbrev [5] 0x56:0x9 DW_TAG_formal_parameter
+ .byte 1 # DW_AT_location
+ .byte 9 # DW_AT_name
+ .byte 0 # DW_AT_decl_file
+ .byte 4 # DW_AT_decl_line
+ .long 142 # DW_AT_type
+ .byte 6 # Abbrev [6] 0x5f:0xb DW_TAG_variable
+ .byte 2 # DW_AT_location
+ .byte 145
+ .byte 4
+ .byte 7 # DW_AT_name
+ .byte 0 # DW_AT_decl_file
+ .byte 5 # DW_AT_decl_line
+ .long 133 # DW_AT_type
+ .byte 7 # Abbrev [7] 0x6a:0xd DW_TAG_call_site
+ .long 39 # DW_AT_call_origin
+ .byte 2 # DW_AT_call_return_pc
+ .byte 8 # Abbrev [8] 0x70:0x6 DW_TAG_call_site_parameter
+ .byte 1 # DW_AT_location
+ .byte 85
+ .byte 2 # DW_AT_call_value
+ .byte 126
+ .byte 0
+ .byte 0 # End Of Children Mark
+ .byte 7 # Abbrev [7] 0x77:0xd DW_TAG_call_site
+ .long 39 # DW_AT_call_origin
+ .byte 3 # DW_AT_call_return_pc
+ .byte 8 # Abbrev [8] 0x7d:0x6 DW_TAG_call_site_parameter
+ .byte 1 # DW_AT_location
+ .byte 85
+ .byte 2 # DW_AT_call_value
+ .byte 126
+ .byte 0
+ .byte 0 # End Of Children Mark
+ .byte 0 # End Of Children Mark
+ .byte 9 # Abbrev [9] 0x85:0x4 DW_TAG_base_type
+ .byte 6 # DW_AT_name
+ .byte 5 # DW_AT_encoding
+ .byte 4 # DW_AT_byte_size
+ .byte 10 # Abbrev [10] 0x89:0x5 DW_TAG_pointer_type
+ .long 133 # DW_AT_type
+ .byte 10 # Abbrev [10] 0x8e:0x5 DW_TAG_pointer_type
+ .long 147 # DW_AT_type
+ .byte 10 # Abbrev [10] 0x93:0x5 DW_TAG_pointer_type
+ .long 152 # DW_AT_type
+ .byte 9 # Abbrev [9] 0x98:0x4 DW_TAG_base_type
+ .byte 10 # DW_AT_name
+ .byte 6 # DW_AT_encoding
+ .byte 1 # DW_AT_byte_size
+ .byte 0 # End Of Children Mark
+.Ldebug_info_end0:
+ .section .debug_str_offsets,"",@progbits
+ .long 48 # Length of String Offsets Set
+ .short 5
+ .short 0
+.Lstr_offsets_base0:
+ .section .debug_str,"MS",@progbits,1
+.Linfo_string0:
+ .asciz "clang version 19.0.0git (git@github.com:ayermolo/llvm-project.git a1e412af2bf4bf613021f72205f249ab2469f08b)" # string offset=0
+.Linfo_string1:
+ .asciz "main.cpp" # string offset=108
+.Linfo_string2:
+ .asciz "/repro" # string offset=117
+.Linfo_string3:
+ .asciz "_Z3usePi" # string offset=161
+.Linfo_string4:
+ .asciz "use" # string offset=170
+.Linfo_string5:
+ .asciz "main" # string offset=174
+.Linfo_string6:
+ .asciz "int" # string offset=179
+.Linfo_string7:
+ .asciz "x" # string offset=183
+.Linfo_string8:
+ .asciz "argc" # string offset=185
+.Linfo_string9:
+ .asciz "argv" # string offset=190
+.Linfo_string10:
+ .asciz "char" # string offset=195
+ .section .debug_str_offsets,"",@progbits
+ .long .Linfo_string0
+ .long .Linfo_string1
+ .long .Linfo_string2
+ .long .Linfo_string3
+ .long .Linfo_string4
+ .long .Linfo_string5
+ .long .Linfo_string6
+ .long .Linfo_string7
+ .long .Linfo_string8
+ .long .Linfo_string9
+ .long .Linfo_string10
+ .section .debug_addr,"",@progbits
+ .long .Ldebug_addr_end0-.Ldebug_addr_start0 # Length of contribution
+.Ldebug_addr_start0:
+ .short 5 # DWARF version number
+ .byte 8 # Address size
+ .byte 0 # Segment selector size
+.Laddr_table_base0:
+ .quad .Lfunc_begin0
+ .quad .Lfunc_begin1
+ .quad .Ltmp3
+ .quad .Ltmp4
+.Ldebug_addr_end0:
+ .ident "clang version 19.0.0git (git@github.com:ayermolo/llvm-project.git a1e412af2bf4bf613021f72205f249ab2469f08b)"
+ .section ".note.GNU-stack","",@progbits
+ .addrsig
+ .section .debug_line,"",@progbits
+.Lline_table_start0:
diff --git a/bolt/test/X86/dwarf5-subprogram-single-gc-ranges.test b/bolt/test/X86/dwarf5-subprogram-single-gc-ranges.test
index 04b7203..9f8f895 100644
--- a/bolt/test/X86/dwarf5-subprogram-single-gc-ranges.test
+++ b/bolt/test/X86/dwarf5-subprogram-single-gc-ranges.test
@@ -2,14 +2,12 @@
# RUN: llvm-mc -dwarf-version=5 -filetype=obj -triple x86_64-unknown-linux %p/Inputs/dwarf5-subprogram-single-gc-ranges-main.s -o %t1.o
# RUN: %clang %cflags %t1.o -o %t.exe -Wl,-q
-# RUN: llvm-bolt %t.exe -o %t.bolt --update-debug-sections &> %t1.txt
-# RUN: llvm-dwarfdump --show-form --verbose --debug-info %t.bolt >> %t1.txt
+# RUN: llvm-bolt %t.exe -o %t.bolt --update-debug-sections
+# RUN: llvm-dwarfdump --show-form --verbose --debug-info %t.bolt > %t1.txt
# RUN: cat %t1.txt | FileCheck --check-prefix=POSTCHECK %s
# This test checks BOLT correctly handles DW_TAG_subprogram with Ranges with single entry, when function was GCed.
-# POSTCHECK: BOLT-WARNING: [internal-dwarf-error]: subprogram got GCed by the linker, DW_AT_ranges is used
-
# POSTCHECK: DW_TAG_subprogram
# POSTCHECK-NEXT: DW_AT_frame_base
# POSTCHECK-NEXT: DW_AT_linkage_name
diff --git a/clang-tools-extra/clang-tidy/bugprone/UnusedLocalNonTrivialVariableCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/UnusedLocalNonTrivialVariableCheck.cpp
index 1b763d2..37baae7 100644
--- a/clang-tools-extra/clang-tidy/bugprone/UnusedLocalNonTrivialVariableCheck.cpp
+++ b/clang-tools-extra/clang-tidy/bugprone/UnusedLocalNonTrivialVariableCheck.cpp
@@ -60,6 +60,7 @@ void UnusedLocalNonTrivialVariableCheck::registerMatchers(MatchFinder *Finder) {
varDecl(isLocalVarDecl(), unless(isReferenced()),
unless(isExceptionVariable()), hasLocalStorage(), isDefinition(),
unless(hasType(isReferenceType())), unless(hasType(isTrivial())),
+ unless(hasAttr(attr::Kind::Unused)),
hasType(hasUnqualifiedDesugaredType(
anyOf(recordType(hasDeclaration(namedDecl(
matchesAnyListedName(IncludeTypes),
diff --git a/clang-tools-extra/clang-tidy/modernize/UseOverrideCheck.cpp b/clang-tools-extra/clang-tidy/modernize/UseOverrideCheck.cpp
index e348968..fd5bd9f 100644
--- a/clang-tools-extra/clang-tidy/modernize/UseOverrideCheck.cpp
+++ b/clang-tools-extra/clang-tidy/modernize/UseOverrideCheck.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "UseOverrideCheck.h"
+#include "../utils/LexerUtils.h"
#include "clang/AST/ASTContext.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/Lex/Lexer.h"
@@ -228,9 +229,14 @@ void UseOverrideCheck::check(const MatchFinder::MatchResult &Result) {
if (HasVirtual) {
for (Token Tok : Tokens) {
if (Tok.is(tok::kw_virtual)) {
- Diag << FixItHint::CreateRemoval(CharSourceRange::getTokenRange(
- Tok.getLocation(), Tok.getLocation()));
- break;
+ std::optional<Token> NextToken =
+ utils::lexer::findNextTokenIncludingComments(
+ Tok.getEndLoc(), Sources, getLangOpts());
+ if (NextToken.has_value()) {
+ Diag << FixItHint::CreateRemoval(CharSourceRange::getCharRange(
+ Tok.getLocation(), NextToken->getLocation()));
+ break;
+ }
}
}
}
diff --git a/clang-tools-extra/clang-tidy/readability/RedundantInlineSpecifierCheck.cpp b/clang-tools-extra/clang-tidy/readability/RedundantInlineSpecifierCheck.cpp
index 0e8d17d..1693e5c 100644
--- a/clang-tools-extra/clang-tidy/readability/RedundantInlineSpecifierCheck.cpp
+++ b/clang-tools-extra/clang-tidy/readability/RedundantInlineSpecifierCheck.cpp
@@ -88,10 +88,14 @@ void RedundantInlineSpecifierCheck::registerMatchers(MatchFinder *Finder) {
this);
if (getLangOpts().CPlusPlus17) {
+ const auto IsPartOfRecordDecl = hasAncestor(recordDecl());
Finder->addMatcher(
- varDecl(isInlineSpecified(),
- anyOf(isInternalLinkage(StrictMode),
- allOf(isConstexpr(), hasAncestor(recordDecl()))))
+ varDecl(
+ isInlineSpecified(),
+ anyOf(allOf(isInternalLinkage(StrictMode),
+ unless(allOf(hasInitializer(expr()), IsPartOfRecordDecl,
+ isStaticStorageClass()))),
+ allOf(isConstexpr(), IsPartOfRecordDecl)))
.bind("var_decl"),
this);
}
diff --git a/clang-tools-extra/clangd/unittests/DiagnosticsTests.cpp b/clang-tools-extra/clangd/unittests/DiagnosticsTests.cpp
index f302dcf5..2f6dd06 100644
--- a/clang-tools-extra/clangd/unittests/DiagnosticsTests.cpp
+++ b/clang-tools-extra/clangd/unittests/DiagnosticsTests.cpp
@@ -898,6 +898,48 @@ TEST(DiagnosticTest, ClangTidySelfContainedDiags) {
withFix(equalToFix(ExpectedDFix))))));
}
+TEST(DiagnosticTest, ClangTidySelfContainedDiagsFormatting) {
+ Annotations Main(R"cpp(
+ class Interface {
+ public:
+ virtual void Reset1() = 0;
+ virtual void Reset2() = 0;
+ };
+ class A : public Interface {
+ // This will be marked by clangd to use override instead of virtual
+ $virtual1[[virtual ]]void $Reset1[[Reset1]]()$override1[[]];
+ $virtual2[[virtual ]]/**/void $Reset2[[Reset2]]()$override2[[]];
+ };
+ )cpp");
+ TestTU TU = TestTU::withCode(Main.code());
+ TU.ClangTidyProvider =
+ addTidyChecks("cppcoreguidelines-explicit-virtual-functions,");
+ clangd::Fix const ExpectedFix1{
+ "prefer using 'override' or (rarely) 'final' "
+ "instead of 'virtual'",
+ {TextEdit{Main.range("override1"), " override"},
+ TextEdit{Main.range("virtual1"), ""}},
+ {}};
+ clangd::Fix const ExpectedFix2{
+ "prefer using 'override' or (rarely) 'final' "
+ "instead of 'virtual'",
+ {TextEdit{Main.range("override2"), " override"},
+ TextEdit{Main.range("virtual2"), ""}},
+ {}};
+ // Note that in the Fix we expect the "virtual" keyword and the following
+ // whitespace to be deleted
+ EXPECT_THAT(TU.build().getDiagnostics(),
+ ifTidyChecks(UnorderedElementsAre(
+ AllOf(Diag(Main.range("Reset1"),
+ "prefer using 'override' or (rarely) 'final' "
+ "instead of 'virtual'"),
+ withFix(equalToFix(ExpectedFix1))),
+ AllOf(Diag(Main.range("Reset2"),
+ "prefer using 'override' or (rarely) 'final' "
+ "instead of 'virtual'"),
+ withFix(equalToFix(ExpectedFix2))))));
+}
+
TEST(DiagnosticsTest, Preprocessor) {
// This looks like a preamble, but there's an #else in the middle!
// Check that:
diff --git a/clang-tools-extra/docs/ReleaseNotes.rst b/clang-tools-extra/docs/ReleaseNotes.rst
index ee68c8f..a1b95d2 100644
--- a/clang-tools-extra/docs/ReleaseNotes.rst
+++ b/clang-tools-extra/docs/ReleaseNotes.rst
@@ -121,6 +121,10 @@ Changes in existing checks
<clang-tidy/checks/bugprone/too-small-loop-variable>` check by incorporating
better support for ``const`` loop boundaries.
+- Improved :doc:`bugprone-unused-local-non-trivial-variable
+ <clang-tidy/checks/bugprone/unused-local-non-trivial-variable>` check by
+ ignoring local variable with ``[maybe_unused]`` attribute.
+
- Cleaned up :doc:`cppcoreguidelines-prefer-member-initializer
<clang-tidy/checks/cppcoreguidelines/prefer-member-initializer>`
by removing enforcement of rule `C.48
@@ -160,6 +164,14 @@ Changes in existing checks
`AllowStringArrays` option, enabling the exclusion of array types with deduced
length initialized from string literals.
+- Improved :doc:`modernize-use-override
+ <clang-tidy/checks/modernize/use-override>` check to also remove any trailing
+ whitespace when deleting the ``virtual`` keyword.
+
+- Improved :doc:`readability-redundant-inline-specifier
+ <clang-tidy/checks/readability/redundant-inline-specifier>` check to properly
+ emit warnings for static data member with an in-class initializer.
+
Removed checks
^^^^^^^^^^^^^^
diff --git a/clang-tools-extra/docs/clang-tidy/checks/bugprone/unused-local-non-trivial-variable.rst b/clang-tools-extra/docs/clang-tidy/checks/bugprone/unused-local-non-trivial-variable.rst
index 7531f19..9f283de 100644
--- a/clang-tools-extra/docs/clang-tidy/checks/bugprone/unused-local-non-trivial-variable.rst
+++ b/clang-tools-extra/docs/clang-tidy/checks/bugprone/unused-local-non-trivial-variable.rst
@@ -11,6 +11,7 @@ The following types of variables are excluded from this check:
* exception variables in catch clauses
* static or thread local
* structured bindings
+* variables with ``[[maybe_unused]]`` attribute
This check can be configured to warn on all non-trivial variables by setting
`IncludeTypes` to `.*`, and excluding specific types using `ExcludeTypes`.
diff --git a/clang-tools-extra/test/clang-tidy/checkers/bugprone/unused-local-non-trivial-variable.cpp b/clang-tools-extra/test/clang-tidy/checkers/bugprone/unused-local-non-trivial-variable.cpp
index 19f2344..3fdc24b 100644
--- a/clang-tools-extra/test/clang-tidy/checkers/bugprone/unused-local-non-trivial-variable.cpp
+++ b/clang-tools-extra/test/clang-tidy/checkers/bugprone/unused-local-non-trivial-variable.cpp
@@ -77,6 +77,7 @@ T qux(T Generic) {
// CHECK-MESSAGES: :[[@LINE-1]]:22: warning: unused local variable 'TemplateType' of type 'async::Future<T>' [bugprone-unused-local-non-trivial-variable]
a::Future<T> AliasTemplateType;
// CHECK-MESSAGES: :[[@LINE-1]]:18: warning: unused local variable 'AliasTemplateType' of type 'a::Future<T>' (aka 'Future<type-parameter-0-0>') [bugprone-unused-local-non-trivial-variable]
+ [[maybe_unused]] async::Future<Units> MaybeUnused;
return Generic;
}
diff --git a/clang-tools-extra/test/clang-tidy/checkers/modernize/use-override.cpp b/clang-tools-extra/test/clang-tidy/checkers/modernize/use-override.cpp
index 55f226b..89d1aa4 100644
--- a/clang-tools-extra/test/clang-tidy/checkers/modernize/use-override.cpp
+++ b/clang-tools-extra/test/clang-tidy/checkers/modernize/use-override.cpp
@@ -27,6 +27,7 @@ struct Base {
virtual void f() = 0;
virtual void f2() const = 0;
virtual void g() = 0;
+ virtual void g2() = 0;
virtual void j() const;
virtual MustUseResultObject k();
@@ -126,6 +127,10 @@ public:
virtual void t() throw();
// CHECK-MESSAGES: :[[@LINE-1]]:16: warning: prefer using
// CHECK-FIXES: {{^}} void t() throw() override;
+
+ virtual /* */ void g2();
+ // CHECK-MESSAGES: :[[@LINE-1]]:33: warning: prefer using 'override' or (rarely) 'final' instead of 'virtual'
+ // CHECK-FIXES: {{^}} /* */ void g2() override;
};
// CHECK-MESSAGES-NOT: warning:
diff --git a/clang-tools-extra/test/clang-tidy/checkers/readability/redundant-inline-specifier.cpp b/clang-tools-extra/test/clang-tidy/checkers/readability/redundant-inline-specifier.cpp
index cdd98d8..14f9e88 100644
--- a/clang-tools-extra/test/clang-tidy/checkers/readability/redundant-inline-specifier.cpp
+++ b/clang-tools-extra/test/clang-tidy/checkers/readability/redundant-inline-specifier.cpp
@@ -135,3 +135,17 @@ INLINE_MACRO()
#define INLINE_KW inline
INLINE_KW void fn10() { }
+
+namespace {
+class A
+{
+public:
+ static inline float test = 3.0F;
+ static inline double test2 = 3.0;
+ static inline int test3 = 3;
+
+ static inline float test4;
+ // CHECK-MESSAGES-STRICT: :[[@LINE-1]]:10: warning: variable 'test4' has inline specifier but is implicitly inlined [readability-redundant-inline-specifier]
+ // CHECK-FIXES-STRICT: static float test4;
+};
+}
diff --git a/clang/docs/ClangFormatStyleOptions.rst b/clang/docs/ClangFormatStyleOptions.rst
index 5deeff0..fdf7bfa 100644
--- a/clang/docs/ClangFormatStyleOptions.rst
+++ b/clang/docs/ClangFormatStyleOptions.rst
@@ -1531,114 +1531,8 @@ the configuration (without a prefix: ``Auto``).
.. _AlwaysBreakAfterReturnType:
-**AlwaysBreakAfterReturnType** (``ReturnTypeBreakingStyle``) :versionbadge:`clang-format 3.8` :ref:`¶ <AlwaysBreakAfterReturnType>`
- The function declaration return type breaking style to use.
-
- Possible values:
-
- * ``RTBS_None`` (in configuration: ``None``)
- This is **deprecated**. See ``Automatic`` below.
-
- * ``RTBS_Automatic`` (in configuration: ``Automatic``)
- Break after return type based on ``PenaltyReturnTypeOnItsOwnLine``.
-
- .. code-block:: c++
-
- class A {
- int f() { return 0; };
- };
- int f();
- int f() { return 1; }
- int
- LongName::AnotherLongName();
-
- * ``RTBS_ExceptShortType`` (in configuration: ``ExceptShortType``)
- Same as ``Automatic`` above, except that there is no break after short
- return types.
-
- .. code-block:: c++
-
- class A {
- int f() { return 0; };
- };
- int f();
- int f() { return 1; }
- int LongName::
- AnotherLongName();
-
- * ``RTBS_All`` (in configuration: ``All``)
- Always break after the return type.
-
- .. code-block:: c++
-
- class A {
- int
- f() {
- return 0;
- };
- };
- int
- f();
- int
- f() {
- return 1;
- }
- int
- LongName::AnotherLongName();
-
- * ``RTBS_TopLevel`` (in configuration: ``TopLevel``)
- Always break after the return types of top-level functions.
-
- .. code-block:: c++
-
- class A {
- int f() { return 0; };
- };
- int
- f();
- int
- f() {
- return 1;
- }
- int
- LongName::AnotherLongName();
-
- * ``RTBS_AllDefinitions`` (in configuration: ``AllDefinitions``)
- Always break after the return type of function definitions.
-
- .. code-block:: c++
-
- class A {
- int
- f() {
- return 0;
- };
- };
- int f();
- int
- f() {
- return 1;
- }
- int
- LongName::AnotherLongName();
-
- * ``RTBS_TopLevelDefinitions`` (in configuration: ``TopLevelDefinitions``)
- Always break after the return type of top-level definitions.
-
- .. code-block:: c++
-
- class A {
- int f() { return 0; };
- };
- int f();
- int
- f() {
- return 1;
- }
- int
- LongName::AnotherLongName();
-
-
+**AlwaysBreakAfterReturnType** (``deprecated``) :versionbadge:`clang-format 3.8` :ref:`¶ <AlwaysBreakAfterReturnType>`
+ This option is renamed to ``BreakAfterReturnType``.
.. _AlwaysBreakBeforeMultilineStrings:
@@ -2219,6 +2113,117 @@ the configuration (without a prefix: ``Auto``).
@Mock
DataLoad loader;
+.. _BreakAfterReturnType:
+
+**BreakAfterReturnType** (``ReturnTypeBreakingStyle``) :versionbadge:`clang-format 19` :ref:`¶ <BreakAfterReturnType>`
+ The function declaration return type breaking style to use.
+
+ Possible values:
+
+ * ``RTBS_None`` (in configuration: ``None``)
+ This is **deprecated**. See ``Automatic`` below.
+
+ * ``RTBS_Automatic`` (in configuration: ``Automatic``)
+ Break after return type based on ``PenaltyReturnTypeOnItsOwnLine``.
+
+ .. code-block:: c++
+
+ class A {
+ int f() { return 0; };
+ };
+ int f();
+ int f() { return 1; }
+ int
+ LongName::AnotherLongName();
+
+ * ``RTBS_ExceptShortType`` (in configuration: ``ExceptShortType``)
+ Same as ``Automatic`` above, except that there is no break after short
+ return types.
+
+ .. code-block:: c++
+
+ class A {
+ int f() { return 0; };
+ };
+ int f();
+ int f() { return 1; }
+ int LongName::
+ AnotherLongName();
+
+ * ``RTBS_All`` (in configuration: ``All``)
+ Always break after the return type.
+
+ .. code-block:: c++
+
+ class A {
+ int
+ f() {
+ return 0;
+ };
+ };
+ int
+ f();
+ int
+ f() {
+ return 1;
+ }
+ int
+ LongName::AnotherLongName();
+
+ * ``RTBS_TopLevel`` (in configuration: ``TopLevel``)
+ Always break after the return types of top-level functions.
+
+ .. code-block:: c++
+
+ class A {
+ int f() { return 0; };
+ };
+ int
+ f();
+ int
+ f() {
+ return 1;
+ }
+ int
+ LongName::AnotherLongName();
+
+ * ``RTBS_AllDefinitions`` (in configuration: ``AllDefinitions``)
+ Always break after the return type of function definitions.
+
+ .. code-block:: c++
+
+ class A {
+ int
+ f() {
+ return 0;
+ };
+ };
+ int f();
+ int
+ f() {
+ return 1;
+ }
+ int
+ LongName::AnotherLongName();
+
+ * ``RTBS_TopLevelDefinitions`` (in configuration: ``TopLevelDefinitions``)
+ Always break after the return type of top-level definitions.
+
+ .. code-block:: c++
+
+ class A {
+ int f() { return 0; };
+ };
+ int f();
+ int
+ f() {
+ return 1;
+ }
+ int
+ LongName::AnotherLongName();
+
+
+
.. _BreakArrays:
**BreakArrays** (``Boolean``) :versionbadge:`clang-format 16` :ref:`¶ <BreakArrays>`
diff --git a/clang/docs/LanguageExtensions.rst b/clang/docs/LanguageExtensions.rst
index e911568..1db8026 100644
--- a/clang/docs/LanguageExtensions.rst
+++ b/clang/docs/LanguageExtensions.rst
@@ -2764,6 +2764,39 @@ Query for this feature with ``__has_builtin(__builtin_readcyclecounter)``. Note
that even if present, its use may depend on run-time privilege or other OS
controlled state.
+``__builtin_readsteadycounter``
+-------------------------------
+
+``__builtin_readsteadycounter`` is used to access the fixed frequency counter
+register (or a similar steady-rate clock) on those targets that support it.
+The function is similar to ``__builtin_readcyclecounter`` above except that the
+frequency is fixed, making it suitable for measuring elapsed time.
+
+**Syntax**:
+
+.. code-block:: c++
+
+ __builtin_readsteadycounter()
+
+**Example of Use**:
+
+.. code-block:: c++
+
+ unsigned long long t0 = __builtin_readsteadycounter();
+ do_something();
+ unsigned long long t1 = __builtin_readsteadycounter();
+ unsigned long long secs_to_do_something = (t1 - t0) / tick_rate;
+
+**Description**:
+
+The ``__builtin_readsteadycounter()`` builtin returns the frequency counter value.
+When not supported by the target, the return value is always zero. This builtin
+takes no arguments and produces an unsigned long long result. The builtin does
+not guarantee any particular frequency, only that it is stable. Knowledge of the
+counter's true frequency will need to be provided by the user.
+
+Query for this feature with ``__has_builtin(__builtin_readsteadycounter)``.
+
``__builtin_dump_struct``
-------------------------
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index 402a2f8..a745f20 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -56,6 +56,12 @@ Clang Frontend Potentially Breaking Changes
``ArrayRef<TemplateArgument>`` reduces AST memory usage by 0.4% when compiling clang, and is
expected to show similar improvements on other workloads.
+- The ``-Wgnu-binary-literal`` diagnostic group no longer controls any
+ diagnostics. Binary literals are no longer a GNU extension, they're now a C23
+ extension which is controlled via ``-pedantic`` or ``-Wc23-extensions``. Use
+ of ``-Wno-gnu-binary-literal`` will no longer silence this pedantic warning,
+ which may break existing uses with ``-Werror``.
+
Target OS macros extension
^^^^^^^^^^^^^^^^^^^^^^^^^^
A new Clang extension (see :ref:`here <target_os_detail>`) is enabled for
@@ -113,10 +119,15 @@ C Language Changes
C23 Feature Support
^^^^^^^^^^^^^^^^^^^
+- No longer diagnose use of binary literals as an extension in C23 mode. Fixes
+ `#72017 <https://github.com/llvm/llvm-project/issues/72017>`_.
Non-comprehensive list of changes in this release
-------------------------------------------------
+- Added ``__builtin_readsteadycounter`` for reading fixed frequency hardware
+ counters.
+
New Compiler Flags
------------------
@@ -156,6 +167,8 @@ Improvements to Clang's diagnostics
- The ``-Wshorten-64-to-32`` diagnostic is now grouped under ``-Wimplicit-int-conversion`` instead
of ``-Wconversion``. Fixes `#69444 <https://github.com/llvm/llvm-project/issues/69444>`_.
+- Clang now diagnoses friend declarations with an ``enum`` elaborated-type-specifier in language modes after C++98.
+
Improvements to Clang's time-trace
----------------------------------
@@ -228,6 +241,8 @@ Bug Fixes to C++ Support
or non-constant more accurately. Previously, only a subset of the initializer
elements were considered, misclassifying some initializers as constant. Fixes
some of (`#80510 <https://github.com/llvm/llvm-project/issues/80510>`).
+- Clang now ignores top-level cv-qualifiers on function parameters in template partial orderings.
+ (`#75404 <https://github.com/llvm/llvm-project/issues/75404>`_)
Bug Fixes to AST Handling
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -253,6 +268,8 @@ X86 Support
Arm and AArch64 Support
^^^^^^^^^^^^^^^^^^^^^^^
+- Fixed the incorrect definition of the __ARM_ARCH macro for architectures greater than or equal to v8.1.
+
Android Support
^^^^^^^^^^^^^^^
@@ -288,6 +305,12 @@ DWARF Support in Clang
Floating Point Support in Clang
-------------------------------
+Fixed Point Support in Clang
+----------------------------
+
+- Support fixed point precision macros according to ``7.18a.3`` of
+ `ISO/IEC TR 18037:2008 <https://standards.iso.org/ittf/PubliclyAvailableStandards/c051126_ISO_IEC_TR_18037_2008.zip>`_.
+
AST Matchers
------------
@@ -296,6 +319,8 @@ clang-format
- ``AlwaysBreakTemplateDeclarations`` is deprecated and renamed to
``BreakTemplateDeclarations``.
+- ``AlwaysBreakAfterReturnType`` is deprecated and renamed to
+ ``BreakAfterReturnType``.
libclang
--------
diff --git a/clang/include/clang-c/Index.h b/clang/include/clang-c/Index.h
index 6af4142..3f36206 100644
--- a/clang/include/clang-c/Index.h
+++ b/clang/include/clang-c/Index.h
@@ -2145,7 +2145,11 @@ enum CXCursorKind {
*/
CXCursor_OMPScopeDirective = 306,
- CXCursor_LastStmt = CXCursor_OMPScopeDirective,
+ /** OpenACC Compute Construct.
+ */
+ CXCursor_OpenACCComputeConstruct = 320,
+
+ CXCursor_LastStmt = CXCursor_OpenACCComputeConstruct,
/**
* Cursor that represents the translation unit itself.
diff --git a/clang/include/clang/AST/RecursiveASTVisitor.h b/clang/include/clang/AST/RecursiveASTVisitor.h
index 9da5206..5080551 100644
--- a/clang/include/clang/AST/RecursiveASTVisitor.h
+++ b/clang/include/clang/AST/RecursiveASTVisitor.h
@@ -34,6 +34,7 @@
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtOpenACC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
@@ -505,6 +506,9 @@ private:
bool VisitOMPClauseWithPostUpdate(OMPClauseWithPostUpdate *Node);
bool PostVisitStmt(Stmt *S);
+ bool TraverseOpenACCConstructStmt(OpenACCConstructStmt *S);
+ bool
+ TraverseOpenACCAssociatedStmtConstruct(OpenACCAssociatedStmtConstruct *S);
};
template <typename Derived>
@@ -3910,6 +3914,24 @@ bool RecursiveASTVisitor<Derived>::VisitOMPXBareClause(OMPXBareClause *C) {
return true;
}
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseOpenACCConstructStmt(
+ OpenACCConstructStmt *) {
+ // TODO OpenACC: When we implement clauses, ensure we traverse them here.
+ return true;
+}
+
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseOpenACCAssociatedStmtConstruct(
+ OpenACCAssociatedStmtConstruct *S) {
+ TRY_TO(TraverseOpenACCConstructStmt(S));
+ TRY_TO(TraverseStmt(S->getAssociatedStmt()));
+ return true;
+}
+
+DEF_TRAVERSE_STMT(OpenACCComputeConstruct,
+ { TRY_TO(TraverseOpenACCAssociatedStmtConstruct(S)); })
+
// FIXME: look at the following tricky-seeming exprs to see if we
// need to recurse on anything. These are ones that have methods
// returning decls or qualtypes or nestednamespecifier -- though I'm
diff --git a/clang/include/clang/AST/StmtOpenACC.h b/clang/include/clang/AST/StmtOpenACC.h
new file mode 100644
index 0000000..9424f4f
--- /dev/null
+++ b/clang/include/clang/AST/StmtOpenACC.h
@@ -0,0 +1,142 @@
+//===- StmtOpenACC.h - Classes for OpenACC directives ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file defines OpenACC AST classes for statement-level contructs.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_STMTOPENACC_H
+#define LLVM_CLANG_AST_STMTOPENACC_H
+
+#include "clang/AST/Stmt.h"
+#include "clang/Basic/OpenACCKinds.h"
+#include "clang/Basic/SourceLocation.h"
+
+namespace clang {
+/// This is the base class for an OpenACC statement-level construct, other
+/// construct types are expected to inherit from this.
+class OpenACCConstructStmt : public Stmt {
+ friend class ASTStmtWriter;
+ friend class ASTStmtReader;
+ /// The directive kind. Each implementation of this interface should handle
+ /// specific kinds.
+ OpenACCDirectiveKind Kind = OpenACCDirectiveKind::Invalid;
+ /// The location of the directive statement, from the '#' to the last token of
+ /// the directive.
+ SourceRange Range;
+
+ // TODO OPENACC: Clauses should probably be collected in this class.
+
+protected:
+ OpenACCConstructStmt(StmtClass SC, OpenACCDirectiveKind K,
+ SourceLocation Start, SourceLocation End)
+ : Stmt(SC), Kind(K), Range(Start, End) {}
+
+public:
+ OpenACCDirectiveKind getDirectiveKind() const { return Kind; }
+
+ static bool classof(const Stmt *S) {
+ return S->getStmtClass() >= firstOpenACCConstructStmtConstant &&
+ S->getStmtClass() <= lastOpenACCConstructStmtConstant;
+ }
+
+ SourceLocation getBeginLoc() const { return Range.getBegin(); }
+ SourceLocation getEndLoc() const { return Range.getEnd(); }
+
+ child_range children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+
+ const_child_range children() const {
+ return const_cast<OpenACCConstructStmt *>(this)->children();
+ }
+};
+
+/// This is a base class for any OpenACC statement-level constructs that have an
+/// associated statement. This class is not intended to be instantiated, but is
+/// a convenient place to hold the associated statement.
+class OpenACCAssociatedStmtConstruct : public OpenACCConstructStmt {
+ friend class ASTStmtWriter;
+ friend class ASTStmtReader;
+ template <typename Derived> friend class RecursiveASTVisitor;
+ Stmt *AssociatedStmt = nullptr;
+
+protected:
+ OpenACCAssociatedStmtConstruct(StmtClass SC, OpenACCDirectiveKind K,
+ SourceLocation Start, SourceLocation End)
+ : OpenACCConstructStmt(SC, K, Start, End) {}
+
+ void setAssociatedStmt(Stmt *S) { AssociatedStmt = S; }
+ Stmt *getAssociatedStmt() { return AssociatedStmt; }
+ const Stmt *getAssociatedStmt() const {
+ return const_cast<OpenACCAssociatedStmtConstruct *>(this)
+ ->getAssociatedStmt();
+ }
+
+public:
+ child_range children() {
+ if (getAssociatedStmt())
+ return child_range(&AssociatedStmt, &AssociatedStmt + 1);
+ return child_range(child_iterator(), child_iterator());
+ }
+
+ const_child_range children() const {
+ return const_cast<OpenACCAssociatedStmtConstruct *>(this)->children();
+ }
+};
+/// This class represents a compute construct, representing a 'Kind' of
+/// `parallel', 'serial', or 'kernel'. These constructs are associated with a
+/// 'structured block', defined as:
+///
+/// in C or C++, an executable statement, possibly compound, with a single
+/// entry at the top and a single exit at the bottom
+///
+/// At the moment there is no real motivation to have a different AST node for
+/// those three, as they are semantically identical, and have only minor
+/// differences in the permitted list of clauses, which can be differentiated by
+/// the 'Kind'.
+class OpenACCComputeConstruct : public OpenACCAssociatedStmtConstruct {
+ friend class ASTStmtWriter;
+ friend class ASTStmtReader;
+ friend class ASTContext;
+ OpenACCComputeConstruct()
+ : OpenACCAssociatedStmtConstruct(OpenACCComputeConstructClass,
+ OpenACCDirectiveKind::Invalid,
+ SourceLocation{}, SourceLocation{}) {}
+
+ OpenACCComputeConstruct(OpenACCDirectiveKind K, SourceLocation Start,
+ SourceLocation End)
+ : OpenACCAssociatedStmtConstruct(OpenACCComputeConstructClass, K, Start,
+ End) {
+ assert((K == OpenACCDirectiveKind::Parallel ||
+ K == OpenACCDirectiveKind::Serial ||
+ K == OpenACCDirectiveKind::Kernels) &&
+ "Only parallel, serial, and kernels constructs should be "
+ "represented by this type");
+ }
+
+ void setStructuredBlock(Stmt *S) { setAssociatedStmt(S); }
+
+public:
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OpenACCComputeConstructClass;
+ }
+
+ static OpenACCComputeConstruct *CreateEmpty(const ASTContext &C, EmptyShell);
+ static OpenACCComputeConstruct *Create(const ASTContext &C,
+ OpenACCDirectiveKind K,
+ SourceLocation BeginLoc,
+ SourceLocation EndLoc);
+
+ Stmt *getStructuredBlock() { return getAssociatedStmt(); }
+ const Stmt *getStructuredBlock() const {
+ return const_cast<OpenACCComputeConstruct *>(this)->getStructuredBlock();
+ }
+};
+} // namespace clang
+#endif // LLVM_CLANG_AST_STMTOPENACC_H
diff --git a/clang/include/clang/AST/StmtVisitor.h b/clang/include/clang/AST/StmtVisitor.h
index 3e51551..990aa2d 100644
--- a/clang/include/clang/AST/StmtVisitor.h
+++ b/clang/include/clang/AST/StmtVisitor.h
@@ -13,13 +13,14 @@
#ifndef LLVM_CLANG_AST_STMTVISITOR_H
#define LLVM_CLANG_AST_STMTVISITOR_H
-#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtOpenACC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/STLExtras.h"
diff --git a/clang/include/clang/AST/TextNodeDumper.h b/clang/include/clang/AST/TextNodeDumper.h
index 3c4283f..de67f0b 100644
--- a/clang/include/clang/AST/TextNodeDumper.h
+++ b/clang/include/clang/AST/TextNodeDumper.h
@@ -401,6 +401,7 @@ public:
void
VisitLifetimeExtendedTemporaryDecl(const LifetimeExtendedTemporaryDecl *D);
void VisitHLSLBufferDecl(const HLSLBufferDecl *D);
+ void VisitOpenACCConstructStmt(const OpenACCConstructStmt *S);
};
} // namespace clang
diff --git a/clang/include/clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def b/clang/include/clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def
index 07f805e..3273c64 100644
--- a/clang/include/clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def
+++ b/clang/include/clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def
@@ -45,7 +45,8 @@ FIXABLE_GADGET(UPCAddressofArraySubscript) // '&DRE[any]' in an Unspecified Poin
FIXABLE_GADGET(UPCStandalonePointer)
FIXABLE_GADGET(UPCPreIncrement) // '++Ptr' in an Unspecified Pointer Context
FIXABLE_GADGET(UUCAddAssign) // 'Ptr += n' in an Unspecified Untyped Context
-FIXABLE_GADGET(PointerAssignment)
+FIXABLE_GADGET(PtrToPtrAssignment)
+FIXABLE_GADGET(CArrayToPtrAssignment)
FIXABLE_GADGET(PointerInit)
#undef FIXABLE_GADGET
diff --git a/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h b/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h
index 20e45cc..98bdf03 100644
--- a/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h
+++ b/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h
@@ -100,6 +100,8 @@ public:
/// to add to a `RecordStorageLocation` of a given type.
/// Typically, this is called from the constructor of a `DataflowAnalysis`
///
+ /// The field types returned by the callback may not have reference type.
+ ///
/// To maintain the invariant that all `RecordStorageLocation`s of a given
/// type have the same fields:
/// * The callback must always return the same result for a given type
@@ -205,8 +207,17 @@ public:
/// type.
llvm::StringMap<QualType> getSyntheticFields(QualType Type) {
assert(Type->isRecordType());
- if (SyntheticFieldCallback)
- return SyntheticFieldCallback(Type);
+ if (SyntheticFieldCallback) {
+ llvm::StringMap<QualType> Result = SyntheticFieldCallback(Type);
+ // Synthetic fields are not allowed to have reference type.
+ assert([&Result] {
+ for (const auto &Entry : Result)
+ if (Entry.getValue()->isReferenceType())
+ return false;
+ return true;
+ }());
+ return Result;
+ }
return {};
}
diff --git a/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h b/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h
index 5c737a5..0aecc74 100644
--- a/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h
+++ b/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h
@@ -681,6 +681,14 @@ private:
llvm::DenseSet<QualType> &Visited,
int Depth, int &CreatedValuesCount);
+ /// Initializes the fields (including synthetic fields) of `Loc` with values,
+ /// unless values of the field type are not supported or we hit one of the
+ /// limits at which we stop producing values (controlled by `Visited`,
+ /// `Depth`, and `CreatedValuesCount`).
+ void initializeFieldsWithValues(RecordStorageLocation &Loc,
+ llvm::DenseSet<QualType> &Visited, int Depth,
+ int &CreatedValuesCount);
+
/// Shared implementation of `createObject()` overloads.
/// `D` and `InitExpr` may be null.
StorageLocation &createObjectInternal(const ValueDecl *D, QualType Ty,
diff --git a/clang/include/clang/Basic/Attr.td b/clang/include/clang/Basic/Attr.td
index 45a29e7..7e643b8 100644
--- a/clang/include/clang/Basic/Attr.td
+++ b/clang/include/clang/Basic/Attr.td
@@ -2891,6 +2891,13 @@ def Suppress : DeclOrStmtAttr {
let Spellings = [CXX11<"gsl", "suppress">, Clang<"suppress">];
let Args = [VariadicStringArgument<"DiagnosticIdentifiers">];
let Accessors = [Accessor<"isGSL", [CXX11<"gsl", "suppress">]>];
+ // There's no fundamental reason why we can't simply accept all Decls
+ // but let's make a short list so that to avoid supporting something weird
+ // by accident. We can always expand the list later.
+ let Subjects = SubjectList<[
+ Stmt, Var, Field, ObjCProperty, Function, ObjCMethod, Record, ObjCInterface,
+ ObjCImplementation, Namespace, Empty
+ ], ErrorDiag, "variables, functions, structs, interfaces, and namespaces">;
let Documentation = [SuppressDocs];
}
diff --git a/clang/include/clang/Basic/AttrDocs.td b/clang/include/clang/Basic/AttrDocs.td
index 8d36909..b96fbdd 100644
--- a/clang/include/clang/Basic/AttrDocs.td
+++ b/clang/include/clang/Basic/AttrDocs.td
@@ -5321,6 +5321,29 @@ Putting the attribute on a compound statement suppresses all warnings in scope:
}
}
+The attribute can also be placed on entire declarations of functions, classes,
+variables, member variables, and so on, to suppress warnings related
+to the declarations themselves. When used this way, the attribute additionally
+suppresses all warnings in the lexical scope of the declaration:
+
+.. code-block:: c++
+
+ class [[clang::suppress]] C {
+ int foo() {
+ int *x = nullptr;
+ ...
+ return *x; // warnings suppressed in the entire class scope
+ }
+
+ int bar();
+ };
+
+ int C::bar() {
+ int *x = nullptr;
+ ...
+ return *x; // warning NOT suppressed! - not lexically nested in 'class C{}'
+ }
+
Some static analysis warnings are accompanied by one or more notes, and the
line of code against which the warning is emitted isn't necessarily the best
for suppression purposes. In such cases the tools are allowed to implement
diff --git a/clang/include/clang/Basic/Builtins.td b/clang/include/clang/Basic/Builtins.td
index 31a2bde..193d585 100644
--- a/clang/include/clang/Basic/Builtins.td
+++ b/clang/include/clang/Basic/Builtins.td
@@ -1110,6 +1110,12 @@ def ReadCycleCounter : Builtin {
let Prototype = "unsigned long long int()";
}
+def ReadSteadyCounter : Builtin {
+ let Spellings = ["__builtin_readsteadycounter"];
+ let Attributes = [NoThrow];
+ let Prototype = "unsigned long long int()";
+}
+
def Trap : Builtin {
let Spellings = ["__builtin_trap"];
let Attributes = [NoThrow, NoReturn];
diff --git a/clang/include/clang/Basic/DiagnosticDriverKinds.td b/clang/include/clang/Basic/DiagnosticDriverKinds.td
index b13181f..0807d88 100644
--- a/clang/include/clang/Basic/DiagnosticDriverKinds.td
+++ b/clang/include/clang/Basic/DiagnosticDriverKinds.td
@@ -804,4 +804,7 @@ def warn_android_unversioned_fallback : Warning<
def err_drv_triple_version_invalid : Error<
"version '%0' in target triple '%1' is invalid">;
+
+def err_drv_installapi_unsupported : Error<
+ "InstallAPI is not supported for '%0'">;
}
diff --git a/clang/include/clang/Basic/DiagnosticGroups.td b/clang/include/clang/Basic/DiagnosticGroups.td
index 975eca0a..7679b85 100644
--- a/clang/include/clang/Basic/DiagnosticGroups.td
+++ b/clang/include/clang/Basic/DiagnosticGroups.td
@@ -44,10 +44,8 @@ def DeprecatedModuleDotMap : DiagGroup<"deprecated-module-dot-map">;
def FrameworkHdrAtImport : DiagGroup<"atimport-in-framework-header">;
def CXX14BinaryLiteral : DiagGroup<"c++14-binary-literal">;
def CXXPre14CompatBinaryLiteral : DiagGroup<"c++98-c++11-compat-binary-literal">;
-def GNUBinaryLiteral : DiagGroup<"gnu-binary-literal">;
def BinaryLiteral : DiagGroup<"binary-literal", [CXX14BinaryLiteral,
- CXXPre14CompatBinaryLiteral,
- GNUBinaryLiteral]>;
+ CXXPre14CompatBinaryLiteral]>;
def GNUCompoundLiteralInitializer : DiagGroup<"gnu-compound-literal-initializer">;
def SingleBitBitFieldConstantConversion :
DiagGroup<"single-bit-bitfield-constant-conversion">;
@@ -1176,10 +1174,13 @@ def C23 : DiagGroup<"c23-extensions">;
def : DiagGroup<"c2x-extensions", [C23]>;
+// Previously supported warning group which is no longer pertinent as binary
+// literals are a C++14 and C23 extension now instead of a GNU extension.
+def GNUBinaryLiteral : DiagGroup<"gnu-binary-literal">;
+
// A warning group for warnings about GCC extensions.
def GNU : DiagGroup<"gnu", [GNUAlignofExpression, GNUAnonymousStruct,
- GNUAutoType,
- GNUBinaryLiteral, GNUCaseRange,
+ GNUAutoType, GNUBinaryLiteral, GNUCaseRange,
GNUComplexInteger, GNUCompoundLiteralInitializer,
GNUConditionalOmittedOperand, GNUDesignator,
GNUEmptyStruct,
diff --git a/clang/include/clang/Basic/DiagnosticLexKinds.td b/clang/include/clang/Basic/DiagnosticLexKinds.td
index 75ca2fa..1354543 100644
--- a/clang/include/clang/Basic/DiagnosticLexKinds.td
+++ b/clang/include/clang/Basic/DiagnosticLexKinds.td
@@ -246,7 +246,10 @@ def warn_cxx17_hex_literal : Warning<
"C++ standards before C++17">,
InGroup<CXXPre17CompatPedantic>, DefaultIgnore;
def ext_binary_literal : Extension<
- "binary integer literals are a GNU extension">, InGroup<GNUBinaryLiteral>;
+ "binary integer literals are a C23 extension">, InGroup<C23>;
+def warn_c23_compat_binary_literal : Warning<
+ "binary integer literals are incompatible with C standards before C23">,
+ InGroup<CPre23Compat>, DefaultIgnore;
def ext_binary_literal_cxx14 : Extension<
"binary integer literals are a C++14 extension">, InGroup<CXX14BinaryLiteral>;
def warn_cxx11_compat_binary_literal : Warning<
diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td
index 754733a..40b47c3 100644
--- a/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -1637,10 +1637,10 @@ def err_inline_namespace_std : Error<
def err_unexpected_friend : Error<
"friends can only be classes or functions">;
def ext_enum_friend : ExtWarn<
- "befriending enumeration type %0 is a C++11 extension">, InGroup<CXX11>;
-def warn_cxx98_compat_enum_friend : Warning<
- "befriending enumeration type %0 is incompatible with C++98">,
- InGroup<CXX98Compat>, DefaultIgnore;
+ "elaborated enum specifier cannot be declared as a friend">,
+ InGroup<DiagGroup<"friend-enum">>;
+def note_enum_friend : Note<
+ "remove 'enum%select{| struct| class}0' to befriend an enum">;
def ext_nonclass_type_friend : ExtWarn<
"non-class friend type %0 is a C++11 extension">, InGroup<CXX11>;
def warn_cxx98_compat_nonclass_type_friend : Warning<
diff --git a/clang/include/clang/Basic/FileManager.h b/clang/include/clang/Basic/FileManager.h
index 997c17a..2245fd7 100644
--- a/clang/include/clang/Basic/FileManager.h
+++ b/clang/include/clang/Basic/FileManager.h
@@ -283,7 +283,7 @@ public:
bool RequiresNullTerminator = true);
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
getBufferForFile(StringRef Filename, bool isVolatile = false,
- bool RequiresNullTerminator = true) {
+ bool RequiresNullTerminator = true) const {
return getBufferForFileImpl(Filename, /*FileSize=*/-1, isVolatile,
RequiresNullTerminator);
}
@@ -291,7 +291,7 @@ public:
private:
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
getBufferForFileImpl(StringRef Filename, int64_t FileSize, bool isVolatile,
- bool RequiresNullTerminator);
+ bool RequiresNullTerminator) const;
public:
/// Get the 'stat' information for the given \p Path.
diff --git a/clang/include/clang/Basic/IdentifierTable.h b/clang/include/clang/Basic/IdentifierTable.h
index fa8969e..a091639 100644
--- a/clang/include/clang/Basic/IdentifierTable.h
+++ b/clang/include/clang/Basic/IdentifierTable.h
@@ -84,28 +84,28 @@ using IdentifierLocPair = std::pair<IdentifierInfo *, SourceLocation>;
/// of a pointer to one of these classes.
enum { IdentifierInfoAlignment = 8 };
-static constexpr int ObjCOrBuiltinIDBits = 16;
+static constexpr int InterestingIdentifierBits = 16;
-/// The "layout" of ObjCOrBuiltinID is:
+/// The "layout" of InterestingIdentifier is:
/// - ObjCKeywordKind enumerators
-/// - InterestingIdentifierKind enumerators
+/// - NotableIdentifierKind enumerators
/// - Builtin::ID enumerators
-/// - NonSpecialIdentifier
-enum class ObjCKeywordOrInterestingOrBuiltin {
+/// - NotInterestingIdentifier
+enum class InterestingIdentifier {
#define OBJC_AT_KEYWORD(X) objc_##X,
#include "clang/Basic/TokenKinds.def"
NUM_OBJC_KEYWORDS,
-#define INTERESTING_IDENTIFIER(X) X,
+#define NOTABLE_IDENTIFIER(X) X,
#include "clang/Basic/TokenKinds.def"
- NUM_OBJC_KEYWORDS_AND_INTERESTING_IDENTIFIERS,
+ NUM_OBJC_KEYWORDS_AND_NOTABLE_IDENTIFIERS,
NotBuiltin,
#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
#include "clang/Basic/Builtins.inc"
FirstTSBuiltin,
- NonSpecialIdentifier = 65534
+ NotInterestingIdentifier = 65534
};
/// One of these records is kept for each identifier that
@@ -121,8 +121,8 @@ class alignas(IdentifierInfoAlignment) IdentifierInfo {
LLVM_PREFERRED_TYPE(tok::TokenKind)
unsigned TokenID : 9;
- LLVM_PREFERRED_TYPE(ObjCKeywordOrInterestingOrBuiltin)
- unsigned ObjCOrBuiltinID : ObjCOrBuiltinIDBits;
+ LLVM_PREFERRED_TYPE(InterestingIdentifier)
+ unsigned InterestingIdentifierID : InterestingIdentifierBits;
// True if there is a #define for this.
LLVM_PREFERRED_TYPE(bool)
@@ -205,8 +205,8 @@ class alignas(IdentifierInfoAlignment) IdentifierInfo {
IdentifierInfo()
: TokenID(tok::identifier),
- ObjCOrBuiltinID(llvm::to_underlying(
- ObjCKeywordOrInterestingOrBuiltin::NonSpecialIdentifier)),
+ InterestingIdentifierID(llvm::to_underlying(
+ InterestingIdentifier::NotInterestingIdentifier)),
HasMacro(false), HadMacro(false), IsExtension(false),
IsFutureCompatKeyword(false), IsPoisoned(false),
IsCPPOperatorKeyword(false), NeedsHandleIdentifier(false),
@@ -341,71 +341,63 @@ public:
///
/// For example, 'class' will return tok::objc_class if ObjC is enabled.
tok::ObjCKeywordKind getObjCKeywordID() const {
- assert(0 == llvm::to_underlying(
- ObjCKeywordOrInterestingOrBuiltin::objc_not_keyword));
- auto Value =
- static_cast<ObjCKeywordOrInterestingOrBuiltin>(ObjCOrBuiltinID);
- if (Value < ObjCKeywordOrInterestingOrBuiltin::NUM_OBJC_KEYWORDS)
- return static_cast<tok::ObjCKeywordKind>(ObjCOrBuiltinID);
+ assert(0 == llvm::to_underlying(InterestingIdentifier::objc_not_keyword));
+ auto Value = static_cast<InterestingIdentifier>(InterestingIdentifierID);
+ if (Value < InterestingIdentifier::NUM_OBJC_KEYWORDS)
+ return static_cast<tok::ObjCKeywordKind>(InterestingIdentifierID);
return tok::objc_not_keyword;
}
void setObjCKeywordID(tok::ObjCKeywordKind ID) {
- assert(0 == llvm::to_underlying(
- ObjCKeywordOrInterestingOrBuiltin::objc_not_keyword));
- ObjCOrBuiltinID = ID;
+ assert(0 == llvm::to_underlying(InterestingIdentifier::objc_not_keyword));
+ InterestingIdentifierID = ID;
assert(getObjCKeywordID() == ID && "ID too large for field!");
}
/// Return a value indicating whether this is a builtin function.
unsigned getBuiltinID() const {
- auto Value =
- static_cast<ObjCKeywordOrInterestingOrBuiltin>(ObjCOrBuiltinID);
- if (Value > ObjCKeywordOrInterestingOrBuiltin::
- NUM_OBJC_KEYWORDS_AND_INTERESTING_IDENTIFIERS &&
- Value != ObjCKeywordOrInterestingOrBuiltin::NonSpecialIdentifier) {
+ auto Value = static_cast<InterestingIdentifier>(InterestingIdentifierID);
+ if (Value >
+ InterestingIdentifier::NUM_OBJC_KEYWORDS_AND_NOTABLE_IDENTIFIERS &&
+ Value != InterestingIdentifier::NotInterestingIdentifier) {
auto FirstBuiltin =
- llvm::to_underlying(ObjCKeywordOrInterestingOrBuiltin::NotBuiltin);
- return static_cast<Builtin::ID>(ObjCOrBuiltinID - FirstBuiltin);
+ llvm::to_underlying(InterestingIdentifier::NotBuiltin);
+ return static_cast<Builtin::ID>(InterestingIdentifierID - FirstBuiltin);
}
return Builtin::ID::NotBuiltin;
}
void setBuiltinID(unsigned ID) {
assert(ID != Builtin::ID::NotBuiltin);
- auto FirstBuiltin =
- llvm::to_underlying(ObjCKeywordOrInterestingOrBuiltin::NotBuiltin);
- ObjCOrBuiltinID = ID + FirstBuiltin;
+ auto FirstBuiltin = llvm::to_underlying(InterestingIdentifier::NotBuiltin);
+ InterestingIdentifierID = ID + FirstBuiltin;
assert(getBuiltinID() == ID && "ID too large for field!");
}
void clearBuiltinID() {
- ObjCOrBuiltinID = llvm::to_underlying(
- ObjCKeywordOrInterestingOrBuiltin::NonSpecialIdentifier);
- }
-
- tok::InterestingIdentifierKind getInterestingIdentifierID() const {
- auto Value =
- static_cast<ObjCKeywordOrInterestingOrBuiltin>(ObjCOrBuiltinID);
- if (Value > ObjCKeywordOrInterestingOrBuiltin::NUM_OBJC_KEYWORDS &&
- Value < ObjCKeywordOrInterestingOrBuiltin::
- NUM_OBJC_KEYWORDS_AND_INTERESTING_IDENTIFIERS) {
- auto FirstInterestingIdentifier =
- 1 + llvm::to_underlying(
- ObjCKeywordOrInterestingOrBuiltin::NUM_OBJC_KEYWORDS);
- return static_cast<tok::InterestingIdentifierKind>(
- ObjCOrBuiltinID - FirstInterestingIdentifier);
+ InterestingIdentifierID =
+ llvm::to_underlying(InterestingIdentifier::NotInterestingIdentifier);
+ }
+
+ tok::NotableIdentifierKind getNotableIdentifierID() const {
+ auto Value = static_cast<InterestingIdentifier>(InterestingIdentifierID);
+ if (Value > InterestingIdentifier::NUM_OBJC_KEYWORDS &&
+ Value <
+ InterestingIdentifier::NUM_OBJC_KEYWORDS_AND_NOTABLE_IDENTIFIERS) {
+ auto FirstNotableIdentifier =
+ 1 + llvm::to_underlying(InterestingIdentifier::NUM_OBJC_KEYWORDS);
+ return static_cast<tok::NotableIdentifierKind>(InterestingIdentifierID -
+ FirstNotableIdentifier);
}
- return tok::not_interesting;
+ return tok::not_notable;
}
- void setInterestingIdentifierID(unsigned ID) {
- assert(ID != tok::not_interesting);
- auto FirstInterestingIdentifier =
- 1 + llvm::to_underlying(
- ObjCKeywordOrInterestingOrBuiltin::NUM_OBJC_KEYWORDS);
- ObjCOrBuiltinID = ID + FirstInterestingIdentifier;
- assert(getInterestingIdentifierID() == ID && "ID too large for field!");
+ void setNotableIdentifierID(unsigned ID) {
+ assert(ID != tok::not_notable);
+ auto FirstNotableIdentifier =
+ 1 + llvm::to_underlying(InterestingIdentifier::NUM_OBJC_KEYWORDS);
+ InterestingIdentifierID = ID + FirstNotableIdentifier;
+ assert(getNotableIdentifierID() == ID && "ID too large for field!");
}
- unsigned getObjCOrBuiltinID() const { return ObjCOrBuiltinID; }
- void setObjCOrBuiltinID(unsigned ID) { ObjCOrBuiltinID = ID; }
+ unsigned getObjCOrBuiltinID() const { return InterestingIdentifierID; }
+ void setObjCOrBuiltinID(unsigned ID) { InterestingIdentifierID = ID; }
/// get/setExtension - Initialize information about whether or not this
/// language token is an extension. This controls extension warnings, and is
diff --git a/clang/include/clang/Basic/OpenACCKinds.h b/clang/include/clang/Basic/OpenACCKinds.h
index afdd0e8..4456f4a 100644
--- a/clang/include/clang/Basic/OpenACCKinds.h
+++ b/clang/include/clang/Basic/OpenACCKinds.h
@@ -16,6 +16,7 @@
#include "clang/Basic/Diagnostic.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
namespace clang {
// Represents the Construct/Directive kind of a pragma directive. Note the
@@ -65,8 +66,9 @@ enum class OpenACCDirectiveKind {
Invalid,
};
-inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &Out,
- OpenACCDirectiveKind K) {
+template <typename StreamTy>
+inline StreamTy &PrintOpenACCDirectiveKind(StreamTy &Out,
+ OpenACCDirectiveKind K) {
switch (K) {
case OpenACCDirectiveKind::Parallel:
return Out << "parallel";
@@ -134,6 +136,16 @@ inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &Out,
llvm_unreachable("Uncovered directive kind");
}
+inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &Out,
+ OpenACCDirectiveKind K) {
+ return PrintOpenACCDirectiveKind(Out, K);
+}
+
+inline llvm::raw_ostream &operator<<(llvm::raw_ostream &Out,
+ OpenACCDirectiveKind K) {
+ return PrintOpenACCDirectiveKind(Out, K);
+}
+
enum class OpenACCAtomicKind {
Read,
Write,
@@ -253,8 +265,8 @@ enum class OpenACCClauseKind {
Invalid,
};
-inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &Out,
- OpenACCClauseKind K) {
+template <typename StreamTy>
+inline StreamTy &PrintOpenACCClauseKind(StreamTy &Out, OpenACCClauseKind K) {
switch (K) {
case OpenACCClauseKind::Finalize:
return Out << "finalize";
@@ -387,6 +399,17 @@ inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &Out,
}
llvm_unreachable("Uncovered clause kind");
}
+
+inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &Out,
+ OpenACCClauseKind K) {
+ return PrintOpenACCClauseKind(Out, K);
+}
+
+inline llvm::raw_ostream &operator<<(llvm::raw_ostream &Out,
+ OpenACCClauseKind K) {
+ return PrintOpenACCClauseKind(Out, K);
+}
+
enum class OpenACCDefaultClauseKind {
/// 'none' option.
None,
diff --git a/clang/include/clang/Basic/StmtNodes.td b/clang/include/clang/Basic/StmtNodes.td
index 9d03800..b4e3ae5 100644
--- a/clang/include/clang/Basic/StmtNodes.td
+++ b/clang/include/clang/Basic/StmtNodes.td
@@ -296,3 +296,9 @@ def OMPTargetTeamsGenericLoopDirective : StmtNode<OMPLoopDirective>;
def OMPParallelGenericLoopDirective : StmtNode<OMPLoopDirective>;
def OMPTargetParallelGenericLoopDirective : StmtNode<OMPLoopDirective>;
def OMPErrorDirective : StmtNode<OMPExecutableDirective>;
+
+// OpenACC Constructs.
+def OpenACCConstructStmt : StmtNode<Stmt, /*abstract=*/1>;
+def OpenACCAssociatedStmtConstruct
+ : StmtNode<OpenACCConstructStmt, /*abstract=*/1>;
+def OpenACCComputeConstruct : StmtNode<OpenACCAssociatedStmtConstruct>;
diff --git a/clang/include/clang/Basic/TokenKinds.def b/clang/include/clang/Basic/TokenKinds.def
index 23817cd..2046ab9 100644
--- a/clang/include/clang/Basic/TokenKinds.def
+++ b/clang/include/clang/Basic/TokenKinds.def
@@ -85,8 +85,8 @@
#ifndef PRAGMA_ANNOTATION
#define PRAGMA_ANNOTATION(X) ANNOTATION(X)
#endif
-#ifndef INTERESTING_IDENTIFIER
-#define INTERESTING_IDENTIFIER(X)
+#ifndef NOTABLE_IDENTIFIER
+#define NOTABLE_IDENTIFIER(X)
#endif
//===----------------------------------------------------------------------===//
@@ -808,15 +808,15 @@ OBJC_AT_KEYWORD(import)
OBJC_AT_KEYWORD(available)
//===----------------------------------------------------------------------===//
-// Interesting identifiers.
+// Notable identifiers.
//===----------------------------------------------------------------------===//
-INTERESTING_IDENTIFIER(not_interesting)
-INTERESTING_IDENTIFIER(FILE)
-INTERESTING_IDENTIFIER(jmp_buf)
-INTERESTING_IDENTIFIER(sigjmp_buf)
-INTERESTING_IDENTIFIER(ucontext_t)
-INTERESTING_IDENTIFIER(float_t)
-INTERESTING_IDENTIFIER(double_t)
+NOTABLE_IDENTIFIER(not_notable)
+NOTABLE_IDENTIFIER(FILE)
+NOTABLE_IDENTIFIER(jmp_buf)
+NOTABLE_IDENTIFIER(sigjmp_buf)
+NOTABLE_IDENTIFIER(ucontext_t)
+NOTABLE_IDENTIFIER(float_t)
+NOTABLE_IDENTIFIER(double_t)
// TODO: What to do about context-sensitive keywords like:
// bycopy/byref/in/inout/oneway/out?
@@ -1011,4 +1011,4 @@ ANNOTATION(repl_input_end)
#undef TOK
#undef C99_KEYWORD
#undef C23_KEYWORD
-#undef INTERESTING_IDENTIFIER
+#undef NOTABLE_IDENTIFIER
diff --git a/clang/include/clang/Basic/TokenKinds.h b/clang/include/clang/Basic/TokenKinds.h
index 7529b92..e5183a2 100644
--- a/clang/include/clang/Basic/TokenKinds.h
+++ b/clang/include/clang/Basic/TokenKinds.h
@@ -44,12 +44,12 @@ enum ObjCKeywordKind {
NUM_OBJC_KEYWORDS
};
-/// Provides a namespace for interesting identifers such as float_t and
+/// Provides a namespace for notable identifers such as float_t and
/// double_t.
-enum InterestingIdentifierKind {
-#define INTERESTING_IDENTIFIER(X) X,
+enum NotableIdentifierKind {
+#define NOTABLE_IDENTIFIER(X) X,
#include "clang/Basic/TokenKinds.def"
- NUM_INTERESTING_IDENTIFIERS
+ NUM_NOTABLE_IDENTIFIERS
};
/// Defines the possible values of an on-off-switch (C99 6.10.6p2).
diff --git a/clang/include/clang/Driver/Action.h b/clang/include/clang/Driver/Action.h
index 04fa8b0..2768e2f 100644
--- a/clang/include/clang/Driver/Action.h
+++ b/clang/include/clang/Driver/Action.h
@@ -59,6 +59,7 @@ public:
PreprocessJobClass,
PrecompileJobClass,
ExtractAPIJobClass,
+ InstallAPIJobClass,
AnalyzeJobClass,
MigrateJobClass,
CompileJobClass,
@@ -448,6 +449,17 @@ public:
void addHeaderInput(Action *Input) { getInputs().push_back(Input); }
};
+class InstallAPIJobAction : public JobAction {
+ void anchor() override;
+
+public:
+ InstallAPIJobAction(Action *Input, types::ID OutputType);
+
+ static bool classof(const Action *A) {
+ return A->getKind() == InstallAPIJobClass;
+ }
+};
+
class AnalyzeJobAction : public JobAction {
void anchor() override;
diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td
index 31e8571..b302afd 100644
--- a/clang/include/clang/Driver/Options.td
+++ b/clang/include/clang/Driver/Options.td
@@ -309,6 +309,8 @@ class AnalyzerOpts<string base>
: KeyPathAndMacro<"AnalyzerOpts->", base, "ANALYZER_"> {}
class MigratorOpts<string base>
: KeyPathAndMacro<"MigratorOpts.", base, "MIGRATOR_"> {}
+class InstallAPIOpts<string base>
+ : KeyPathAndMacro<"InstallAPIOpts.", base, "INSTALLAPI_"> {}
// A boolean option which is opt-in in CC1. The positive option exists in CC1 and
// Args.hasArg(OPT_ffoo) can be used to check that the flag is enabled.
@@ -1114,7 +1116,8 @@ def config_user_dir_EQ : Joined<["--"], "config-user-dir=">,
def coverage : Flag<["-", "--"], "coverage">, Group<Link_Group>,
Visibility<[ClangOption, CLOption]>;
def cpp_precomp : Flag<["-"], "cpp-precomp">, Group<clang_ignored_f_Group>;
-def current__version : JoinedOrSeparate<["-"], "current_version">;
+def current__version : JoinedOrSeparate<["-"], "current_version">,
+ Visibility<[ClangOption, CC1Option]>;
def cxx_isystem : JoinedOrSeparate<["-"], "cxx-isystem">, Group<clang_i_Group>,
HelpText<"Add directory to the C++ SYSTEM include search path">,
Visibility<[ClangOption, CC1Option]>,
@@ -1529,6 +1532,9 @@ def static_libsan : Flag<["-"], "static-libsan">,
HelpText<"Statically link the sanitizer runtime (Not supported for ASan, TSan or UBSan on darwin)">;
def : Flag<["-"], "shared-libasan">, Alias<shared_libsan>;
def fasm : Flag<["-"], "fasm">, Group<f_Group>;
+def installapi : Flag<["-"], "installapi">,
+ Visibility<[ClangOption, CC1Option]>, Group<Action_Group>,
+ HelpText<"Create a text-based stub file by scanning header files">;
defm assume_unique_vtables : BoolFOption<"assume-unique-vtables",
CodeGenOpts<"AssumeUniqueVTables">, DefaultTrue,
@@ -3198,10 +3204,10 @@ def fno_experimental_isel : Flag<["-"], "fno-experimental-isel">, Group<f_clang_
def fveclib : Joined<["-"], "fveclib=">, Group<f_Group>,
Visibility<[ClangOption, CC1Option, FlangOption, FC1Option]>,
HelpText<"Use the given vector functions library">,
- Values<"Accelerate,libmvec,MASSV,SVML,SLEEF,Darwin_libsystem_m,ArmPL,none">,
+ Values<"Accelerate,libmvec,MASSV,SVML,SLEEF,Darwin_libsystem_m,ArmPL,AMDLIBM,none">,
NormalizedValuesScope<"llvm::driver::VectorLibrary">,
NormalizedValues<["Accelerate", "LIBMVEC", "MASSV", "SVML", "SLEEF",
- "Darwin_libsystem_m", "ArmPL", "NoLibrary"]>,
+ "Darwin_libsystem_m", "ArmPL", "AMDLIBM", "NoLibrary"]>,
MarshallingInfoEnum<CodeGenOpts<"VecLib">, "NoLibrary">;
def fno_lax_vector_conversions : Flag<["-"], "fno-lax-vector-conversions">, Group<f_Group>,
Alias<flax_vector_conversions_EQ>, AliasArgs<["none"]>;
@@ -4291,7 +4297,9 @@ def verify_pch : Flag<["-"], "verify-pch">, Group<Action_Group>,
Visibility<[ClangOption, CC1Option]>,
HelpText<"Load and verify that a pre-compiled header file is not stale">;
def init : Separate<["-"], "init">;
-def install__name : Separate<["-"], "install_name">;
+def install__name : Separate<["-"], "install_name">,
+ Visibility<[ClangOption, CC1Option]>,
+ MarshallingInfoString<InstallAPIOpts<"InstallName">>;
def iprefix : JoinedOrSeparate<["-"], "iprefix">, Group<clang_i_Group>,
Visibility<[ClangOption, CC1Option]>,
HelpText<"Set the -iwithprefix/-iwithprefixbefore prefix">, MetaVarName<"<dir>">;
@@ -4414,7 +4422,7 @@ def mwatchsimulator_version_min_EQ : Joined<["-"], "mwatchsimulator-version-min=
def march_EQ : Joined<["-"], "march=">, Group<m_Group>,
Flags<[TargetSpecific]>, Visibility<[ClangOption, CLOption, DXCOption, FlangOption]>,
HelpText<"For a list of available architectures for the target use '-mcpu=help'">;
-def masm_EQ : Joined<["-"], "masm=">, Group<m_Group>;
+def masm_EQ : Joined<["-"], "masm=">, Group<m_Group>, Visibility<[ClangOption, FlangOption]>;
def inline_asm_EQ : Joined<["-"], "inline-asm=">, Group<m_Group>,
Visibility<[ClangOption, CC1Option]>,
Values<"att,intel">,
@@ -5339,7 +5347,7 @@ def print_rocm_search_dirs : Flag<["-", "--"], "print-rocm-search-dirs">,
HelpText<"Print the paths used for finding ROCm installation">,
Visibility<[ClangOption, CLOption]>;
def print_runtime_dir : Flag<["-", "--"], "print-runtime-dir">,
- HelpText<"Print the directory pathname containing clangs runtime libraries">,
+ HelpText<"Print the directory pathname containing Clang's runtime libraries">,
Visibility<[ClangOption, CLOption]>;
def print_diagnostic_options : Flag<["-", "--"], "print-diagnostic-options">,
HelpText<"Print all of Clang's warning options">,
@@ -8466,8 +8474,8 @@ def _SLASH_ZW : CLJoined<"ZW">;
// clang-dxc Options
//===----------------------------------------------------------------------===//
-def dxc_Group : OptionGroup<"<clang-dxc options>">, Visibility<[DXCOption]>,
- HelpText<"dxc compatibility options">;
+def dxc_Group : OptionGroup<"clang-dxc options">, Visibility<[DXCOption]>,
+ HelpText<"dxc compatibility options.">;
class DXCFlag<string name> : Option<["/", "-"], name, KIND_FLAG>,
Group<dxc_Group>, Visibility<[DXCOption]>;
class DXCJoinedOrSeparate<string name> : Option<["/", "-"], name,
diff --git a/clang/include/clang/Driver/Types.def b/clang/include/clang/Driver/Types.def
index f72c27e..570a534 100644
--- a/clang/include/clang/Driver/Types.def
+++ b/clang/include/clang/Driver/Types.def
@@ -94,6 +94,7 @@ TYPE("lto-bc", LTO_BC, INVALID, "o", phases
TYPE("ast", AST, INVALID, "ast", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("ifs", IFS, INVALID, "ifs", phases::IfsMerge)
TYPE("ifs-cpp", IFS_CPP, INVALID, "ifs", phases::Compile, phases::IfsMerge)
+TYPE("tbd", TextAPI, INVALID, "tbd", phases::Precompile)
TYPE("pcm", ModuleFile, INVALID, "pcm", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("header-unit", HeaderUnit, INVALID, "pcm", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("plist", Plist, INVALID, "plist", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
diff --git a/clang/include/clang/Format/Format.h b/clang/include/clang/Format/Format.h
index d9c18e5..e9b2160 100644
--- a/clang/include/clang/Format/Format.h
+++ b/clang/include/clang/Format/Format.h
@@ -1010,9 +1010,10 @@ struct FormatStyle {
/// \version 3.7
DefinitionReturnTypeBreakingStyle AlwaysBreakAfterDefinitionReturnType;
- /// The function declaration return type breaking style to use.
+ /// This option is renamed to ``BreakAfterReturnType``.
/// \version 3.8
- ReturnTypeBreakingStyle AlwaysBreakAfterReturnType;
+ /// @deprecated
+ // ReturnTypeBreakingStyle AlwaysBreakAfterReturnType;
/// If ``true``, always break before multiline string literals.
///
@@ -1576,6 +1577,10 @@ struct FormatStyle {
/// \version 16
AttributeBreakingStyle BreakAfterAttributes;
+ /// The function declaration return type breaking style to use.
+ /// \version 19
+ ReturnTypeBreakingStyle BreakAfterReturnType;
+
/// If ``true``, clang-format will always break after a Json array ``[``
/// otherwise it will scan until the closing ``]`` to determine if it should
/// add newlines between elements (prettier compatible).
@@ -4819,7 +4824,6 @@ struct FormatStyle {
R.AllowShortIfStatementsOnASingleLine &&
AllowShortLambdasOnASingleLine == R.AllowShortLambdasOnASingleLine &&
AllowShortLoopsOnASingleLine == R.AllowShortLoopsOnASingleLine &&
- AlwaysBreakAfterReturnType == R.AlwaysBreakAfterReturnType &&
AlwaysBreakBeforeMultilineStrings ==
R.AlwaysBreakBeforeMultilineStrings &&
AttributeMacros == R.AttributeMacros &&
@@ -4830,6 +4834,7 @@ struct FormatStyle {
BreakAdjacentStringLiterals == R.BreakAdjacentStringLiterals &&
BreakAfterAttributes == R.BreakAfterAttributes &&
BreakAfterJavaFieldAnnotations == R.BreakAfterJavaFieldAnnotations &&
+ BreakAfterReturnType == R.BreakAfterReturnType &&
BreakArrays == R.BreakArrays &&
BreakBeforeBinaryOperators == R.BreakBeforeBinaryOperators &&
BreakBeforeBraces == R.BreakBeforeBraces &&
diff --git a/clang/include/clang/Frontend/CompilerInstance.h b/clang/include/clang/Frontend/CompilerInstance.h
index ac2f940..6eb7972 100644
--- a/clang/include/clang/Frontend/CompilerInstance.h
+++ b/clang/include/clang/Frontend/CompilerInstance.h
@@ -294,6 +294,13 @@ public:
return Invocation->getFrontendOpts();
}
+ InstallAPIOptions &getInstallAPIOpts() {
+ return Invocation->getInstallAPIOpts();
+ }
+ const InstallAPIOptions &getInstallAPIOpts() const {
+ return Invocation->getInstallAPIOpts();
+ }
+
HeaderSearchOptions &getHeaderSearchOpts() {
return Invocation->getHeaderSearchOpts();
}
diff --git a/clang/include/clang/Frontend/CompilerInvocation.h b/clang/include/clang/Frontend/CompilerInvocation.h
index c6528779..a01d969 100644
--- a/clang/include/clang/Frontend/CompilerInvocation.h
+++ b/clang/include/clang/Frontend/CompilerInvocation.h
@@ -18,11 +18,12 @@
#include "clang/Basic/LangStandard.h"
#include "clang/Frontend/DependencyOutputOptions.h"
#include "clang/Frontend/FrontendOptions.h"
+#include "clang/Frontend/InstallAPIOptions.h"
#include "clang/Frontend/MigratorOptions.h"
#include "clang/Frontend/PreprocessorOutputOptions.h"
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
-#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include <memory>
#include <string>
@@ -111,6 +112,9 @@ protected:
/// Options controlling preprocessed output.
std::shared_ptr<PreprocessorOutputOptions> PreprocessorOutputOpts;
+ /// Options controlling InstallAPI operations and output.
+ std::shared_ptr<InstallAPIOptions> InstallAPIOpts;
+
/// Dummy tag type whose instance can be passed into the constructor to
/// prevent creation of the reference-counted option objects.
struct EmptyConstructor {};
@@ -145,6 +149,7 @@ public:
const PreprocessorOutputOptions &getPreprocessorOutputOpts() const {
return *PreprocessorOutputOpts;
}
+ const InstallAPIOptions &getInstallAPIOpts() const { return *InstallAPIOpts; }
/// @}
/// Command line generation.
@@ -237,6 +242,7 @@ public:
using CompilerInvocationBase::getFrontendOpts;
using CompilerInvocationBase::getDependencyOutputOpts;
using CompilerInvocationBase::getPreprocessorOutputOpts;
+ using CompilerInvocationBase::getInstallAPIOpts;
/// @}
/// Mutable getters.
@@ -258,6 +264,7 @@ public:
PreprocessorOutputOptions &getPreprocessorOutputOpts() {
return *PreprocessorOutputOpts;
}
+ InstallAPIOptions &getInstallAPIOpts() { return *InstallAPIOpts; }
/// @}
/// Base class internals.
diff --git a/clang/include/clang/Frontend/FrontendActions.h b/clang/include/clang/Frontend/FrontendActions.h
index fcce31ac..b822925 100644
--- a/clang/include/clang/Frontend/FrontendActions.h
+++ b/clang/include/clang/Frontend/FrontendActions.h
@@ -130,6 +130,16 @@ protected:
bool shouldEraseOutputFiles() override;
};
+class InstallAPIAction : public ASTFrontendAction {
+protected:
+ std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) override;
+
+public:
+ static std::unique_ptr<llvm::raw_pwrite_stream>
+ CreateOutputFile(CompilerInstance &CI, StringRef InFile);
+};
+
class GenerateInterfaceStubsAction : public ASTFrontendAction {
protected:
std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h
index 53a8681..62d16ba 100644
--- a/clang/include/clang/Frontend/FrontendOptions.h
+++ b/clang/include/clang/Frontend/FrontendOptions.h
@@ -100,6 +100,9 @@ enum ActionKind {
/// Only execute frontend initialization.
InitOnly,
+ // Create TextAPI stub.
+ InstallAPI,
+
/// Dump information about a module file.
ModuleFileInfo,
diff --git a/clang/include/clang/Frontend/InstallAPIOptions.h b/clang/include/clang/Frontend/InstallAPIOptions.h
new file mode 100644
index 0000000..cf65a33
--- /dev/null
+++ b/clang/include/clang/Frontend/InstallAPIOptions.h
@@ -0,0 +1,28 @@
+//===--- InstallAPIOptions.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_INSTALLAPIOPTIONS_H
+#define LLVM_CLANG_FRONTEND_INSTALLAPIOPTIONS_H
+
+#include "llvm/TextAPI/PackedVersion.h"
+
+namespace clang {
+
+/// InstallAPIOptions - Options for controlling InstallAPI verification and
+/// TextAPI output.
+class InstallAPIOptions {
+public:
+ /// The install name which is apart of the library's ID.
+ std::string InstallName;
+
+ /// The current version which is apart of the library's ID.
+ llvm::MachO::PackedVersion CurrentVersion;
+};
+} // namespace clang
+
+#endif
diff --git a/clang/include/clang/InstallAPI/Context.h b/clang/include/clang/InstallAPI/Context.h
new file mode 100644
index 0000000..a1ff7c1
--- /dev/null
+++ b/clang/include/clang/InstallAPI/Context.h
@@ -0,0 +1,65 @@
+//===- InstallAPI/Context.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Top level types for interacting with the generic clang driver and frontend
+// for InstallAPI operations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INSTALLAPI_CONTEXT_H
+#define LLVM_CLANG_INSTALLAPI_CONTEXT_H
+
+#include "clang/AST/ASTConsumer.h"
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/TextAPI/InterfaceFile.h"
+#include "llvm/TextAPI/RecordVisitor.h"
+#include "llvm/TextAPI/RecordsSlice.h"
+
+namespace clang {
+namespace installapi {
+
+/// Struct used for generating validating InstallAPI.
+/// The attributes captured represent all necessary information
+/// to generate TextAPI output.
+struct InstallAPIContext {
+
+ /// Library attributes that are typically passed as linker inputs.
+ llvm::MachO::RecordsSlice::BinaryAttrs BA;
+
+ /// Active target triple to parse.
+ llvm::Triple TargetTriple{};
+
+ /// Output stream to write TextAPI file to.
+ std::unique_ptr<llvm::raw_pwrite_stream> OS = nullptr;
+
+ /// DiagnosticsEngine to report errors.
+ llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diags = nullptr;
+
+ /// File Path of output location.
+ StringRef OutputLoc{};
+
+ /// What encoding to write output as.
+ llvm::MachO::FileType FT = llvm::MachO::FileType::TBD_V5;
+};
+
+class InstallAPIConsumer : public ASTConsumer {
+public:
+ InstallAPIConsumer(InstallAPIContext InstallAPICtx)
+ : Ctx(std::move(InstallAPICtx)) {}
+
+ void HandleTranslationUnit(ASTContext &ASTContext) override;
+
+private:
+ InstallAPIContext Ctx;
+};
+
+} // namespace installapi
+} // namespace clang
+
+#endif // LLVM_CLANG_INSTALLAPI_CONTEXT_H
diff --git a/clang/include/clang/Sema/DeclSpec.h b/clang/include/clang/Sema/DeclSpec.h
index d161147..316e807 100644
--- a/clang/include/clang/Sema/DeclSpec.h
+++ b/clang/include/clang/Sema/DeclSpec.h
@@ -346,10 +346,7 @@ public:
// FIXME: Attributes should be included here.
};
- enum FriendSpecified : bool {
- No,
- Yes,
- };
+ enum FriendSpecified : bool { No, Yes };
private:
// storage-class-specifier
@@ -400,7 +397,7 @@ private:
// friend-specifier
LLVM_PREFERRED_TYPE(bool)
- unsigned Friend_specified : 1;
+ unsigned FriendSpecifiedFirst : 1;
// constexpr-specifier
LLVM_PREFERRED_TYPE(ConstexprSpecKind)
@@ -491,7 +488,7 @@ public:
TypeSpecPipe(false), TypeSpecSat(false), ConstrainedAuto(false),
TypeQualifiers(TQ_unspecified), FS_inline_specified(false),
FS_forceinline_specified(false), FS_virtual_specified(false),
- FS_noreturn_specified(false), Friend_specified(false),
+ FS_noreturn_specified(false), FriendSpecifiedFirst(false),
ConstexprSpecifier(
static_cast<unsigned>(ConstexprSpecKind::Unspecified)),
Attrs(attrFactory), writtenBS(), ObjCQualifiers(nullptr) {}
@@ -818,9 +815,11 @@ public:
const char *&PrevSpec, unsigned &DiagID);
FriendSpecified isFriendSpecified() const {
- return static_cast<FriendSpecified>(Friend_specified);
+ return static_cast<FriendSpecified>(FriendLoc.isValid());
}
+ bool isFriendSpecifiedFirst() const { return FriendSpecifiedFirst; }
+
SourceLocation getFriendSpecLoc() const { return FriendLoc; }
bool isModulePrivateSpecified() const { return ModulePrivateLoc.isValid(); }
diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h
index ed933f2..978949a 100644
--- a/clang/include/clang/Sema/Sema.h
+++ b/clang/include/clang/Sema/Sema.h
@@ -8039,9 +8039,6 @@ public:
SourceLocation RParenLoc, bool Failed);
void DiagnoseStaticAssertDetails(const Expr *E);
- FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
- SourceLocation FriendLoc,
- TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
diff --git a/clang/include/clang/Serialization/ASTBitCodes.h b/clang/include/clang/Serialization/ASTBitCodes.h
index 9de9251..f31efa5 100644
--- a/clang/include/clang/Serialization/ASTBitCodes.h
+++ b/clang/include/clang/Serialization/ASTBitCodes.h
@@ -2018,6 +2018,9 @@ enum StmtCode {
// SYCLUniqueStableNameExpr
EXPR_SYCL_UNIQUE_STABLE_NAME,
+
+ // OpenACC Constructs
+ STMT_OPENACC_COMPUTE_CONSTRUCT,
};
/// The kinds of designators that can occur in a
diff --git a/clang/lib/AST/ASTStructuralEquivalence.cpp b/clang/lib/AST/ASTStructuralEquivalence.cpp
index 3b7ebbb..fe6e03c 100644
--- a/clang/lib/AST/ASTStructuralEquivalence.cpp
+++ b/clang/lib/AST/ASTStructuralEquivalence.cpp
@@ -74,6 +74,7 @@
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtOpenACC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
diff --git a/clang/lib/AST/CMakeLists.txt b/clang/lib/AST/CMakeLists.txt
index ebcb395..49dcf2e 100644
--- a/clang/lib/AST/CMakeLists.txt
+++ b/clang/lib/AST/CMakeLists.txt
@@ -112,6 +112,7 @@ add_clang_library(clangAST
StmtCXX.cpp
StmtIterator.cpp
StmtObjC.cpp
+ StmtOpenACC.cpp
StmtOpenMP.cpp
StmtPrinter.cpp
StmtProfile.cpp
diff --git a/clang/lib/AST/Interp/ByteCodeExprGen.cpp b/clang/lib/AST/Interp/ByteCodeExprGen.cpp
index 8a2c1e5..9887659 100644
--- a/clang/lib/AST/Interp/ByteCodeExprGen.cpp
+++ b/clang/lib/AST/Interp/ByteCodeExprGen.cpp
@@ -167,7 +167,9 @@ bool ByteCodeExprGen<Emitter>::VisitCastExpr(const CastExpr *CE) {
return this->emitNull(classifyPrim(CE->getType()), CE);
case CK_PointerToIntegral: {
- // TODO: Discard handling.
+ if (DiscardResult)
+ return this->discard(SubExpr);
+
if (!this->visit(SubExpr))
return false;
@@ -1827,8 +1829,19 @@ bool ByteCodeExprGen<Emitter>::VisitCXXConstructExpr(
return false;
}
- if (!this->emitCall(Func, E))
- return false;
+ if (Func->isVariadic()) {
+ uint32_t VarArgSize = 0;
+ unsigned NumParams = Func->getNumWrittenParams();
+ for (unsigned I = NumParams, N = E->getNumArgs(); I != N; ++I) {
+ VarArgSize +=
+ align(primSize(classify(E->getArg(I)->getType()).value_or(PT_Ptr)));
+ }
+ if (!this->emitCallVar(Func, VarArgSize, E))
+ return false;
+ } else {
+ if (!this->emitCall(Func, 0, E))
+ return false;
+ }
// Immediately call the destructor if we have to.
if (DiscardResult) {
@@ -1861,7 +1874,7 @@ bool ByteCodeExprGen<Emitter>::VisitCXXConstructExpr(
return false;
}
- if (!this->emitCall(Func, E))
+ if (!this->emitCall(Func, 0, E))
return false;
}
return true;
@@ -2047,7 +2060,7 @@ bool ByteCodeExprGen<Emitter>::VisitCXXInheritedCtorInitExpr(
Offset += align(primSize(PT));
}
- return this->emitCall(F, E);
+ return this->emitCall(F, 0, E);
}
template <class Emitter>
@@ -2057,6 +2070,79 @@ bool ByteCodeExprGen<Emitter>::VisitExpressionTraitExpr(
return this->emitConstBool(E->getValue(), E);
}
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitCXXUuidofExpr(const CXXUuidofExpr *E) {
+ if (DiscardResult)
+ return true;
+ assert(!Initializing);
+
+ std::optional<unsigned> GlobalIndex = P.getOrCreateGlobal(E->getGuidDecl());
+ if (!GlobalIndex)
+ return false;
+ if (!this->emitGetPtrGlobal(*GlobalIndex, E))
+ return false;
+
+ const Record *R = this->getRecord(E->getType());
+ assert(R);
+
+ const APValue &V = E->getGuidDecl()->getAsAPValue();
+ if (V.getKind() == APValue::None)
+ return true;
+
+ assert(V.isStruct());
+ assert(V.getStructNumBases() == 0);
+ // FIXME: This could be useful in visitAPValue, too.
+ for (unsigned I = 0, N = V.getStructNumFields(); I != N; ++I) {
+ const APValue &F = V.getStructField(I);
+ const Record::Field *RF = R->getField(I);
+
+ if (F.isInt()) {
+ PrimType T = classifyPrim(RF->Decl->getType());
+ if (!this->visitAPValue(F, T, E))
+ return false;
+ if (!this->emitInitField(T, RF->Offset, E))
+ return false;
+ } else if (F.isArray()) {
+ assert(RF->Desc->isPrimitiveArray());
+ const auto *ArrType = RF->Decl->getType()->getAsArrayTypeUnsafe();
+ PrimType ElemT = classifyPrim(ArrType->getElementType());
+ assert(ArrType);
+
+ if (!this->emitDupPtr(E))
+ return false;
+ if (!this->emitGetPtrField(RF->Offset, E))
+ return false;
+
+ for (unsigned A = 0, AN = F.getArraySize(); A != AN; ++A) {
+ if (!this->visitAPValue(F.getArrayInitializedElt(A), ElemT, E))
+ return false;
+ if (!this->emitInitElem(ElemT, A, E))
+ return false;
+ }
+
+ if (!this->emitPopPtr(E))
+ return false;
+ } else {
+ assert(false && "I don't think this should be possible");
+ }
+ }
+
+ return this->emitInitPtr(E);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitRequiresExpr(const RequiresExpr *E) {
+ assert(classifyPrim(E->getType()) == PT_Bool);
+ return this->emitConstBool(E->isSatisfied(), E);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitConceptSpecializationExpr(
+ const ConceptSpecializationExpr *E) {
+ assert(classifyPrim(E->getType()) == PT_Bool);
+ return this->emitConstBool(E->isSatisfied(), E);
+}
+
template <class Emitter> bool ByteCodeExprGen<Emitter>::discard(const Expr *E) {
if (E->containsErrors())
return false;
@@ -2771,20 +2857,38 @@ bool ByteCodeExprGen<Emitter>::VisitCallExpr(const CallExpr *E) {
// and if the function has RVO, we already have the pointer on the stack to
// write the result into.
if (IsVirtual && !HasQualifier) {
- if (!this->emitCallVirt(Func, E))
+ uint32_t VarArgSize = 0;
+ unsigned NumParams = Func->getNumWrittenParams();
+ for (unsigned I = NumParams, N = E->getNumArgs(); I != N; ++I)
+ VarArgSize += align(primSize(classify(E->getArg(I)).value_or(PT_Ptr)));
+
+ if (!this->emitCallVirt(Func, VarArgSize, E))
+ return false;
+ } else if (Func->isVariadic()) {
+ uint32_t VarArgSize = 0;
+ unsigned NumParams = Func->getNumWrittenParams();
+ for (unsigned I = NumParams, N = E->getNumArgs(); I != N; ++I)
+ VarArgSize += align(primSize(classify(E->getArg(I)).value_or(PT_Ptr)));
+ if (!this->emitCallVar(Func, VarArgSize, E))
return false;
} else {
- if (!this->emitCall(Func, E))
+ if (!this->emitCall(Func, 0, E))
return false;
}
} else {
// Indirect call. Visit the callee, which will leave a FunctionPointer on
// the stack. Cleanup of the returned value if necessary will be done after
// the function call completed.
+
+ // Sum the size of all args from the call expr.
+ uint32_t ArgSize = 0;
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
+ ArgSize += align(primSize(classify(E->getArg(I)).value_or(PT_Ptr)));
+
if (!this->visit(E->getCallee()))
return false;
- if (!this->emitCallPtr(E))
+ if (!this->emitCallPtr(ArgSize, E))
return false;
}
@@ -3311,7 +3415,7 @@ bool ByteCodeExprGen<Emitter>::emitRecordDestruction(const Descriptor *Desc) {
assert(DtorFunc->getNumParams() == 1);
if (!this->emitDupPtr(SourceInfo{}))
return false;
- if (!this->emitCall(DtorFunc, SourceInfo{}))
+ if (!this->emitCall(DtorFunc, 0, SourceInfo{}))
return false;
}
}
diff --git a/clang/lib/AST/Interp/ByteCodeExprGen.h b/clang/lib/AST/Interp/ByteCodeExprGen.h
index ae216f5..eeb56dc 100644
--- a/clang/lib/AST/Interp/ByteCodeExprGen.h
+++ b/clang/lib/AST/Interp/ByteCodeExprGen.h
@@ -113,6 +113,9 @@ public:
bool VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E);
bool VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
bool VisitExpressionTraitExpr(const ExpressionTraitExpr *E);
+ bool VisitCXXUuidofExpr(const CXXUuidofExpr *E);
+ bool VisitRequiresExpr(const RequiresExpr *E);
+ bool VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E);
protected:
bool visitExpr(const Expr *E) override;
diff --git a/clang/lib/AST/Interp/ByteCodeStmtGen.cpp b/clang/lib/AST/Interp/ByteCodeStmtGen.cpp
index bedcc78d..7e2043f 100644
--- a/clang/lib/AST/Interp/ByteCodeStmtGen.cpp
+++ b/clang/lib/AST/Interp/ByteCodeStmtGen.cpp
@@ -126,7 +126,7 @@ bool ByteCodeStmtGen<Emitter>::emitLambdaStaticInvokerBody(
return false;
}
- if (!this->emitCall(Func, LambdaCallOp))
+ if (!this->emitCall(Func, 0, LambdaCallOp))
return false;
this->emitCleanup();
diff --git a/clang/lib/AST/Interp/Context.cpp b/clang/lib/AST/Interp/Context.cpp
index 5f5a662..7396db2 100644
--- a/clang/lib/AST/Interp/Context.cpp
+++ b/clang/lib/AST/Interp/Context.cpp
@@ -209,7 +209,8 @@ bool Context::Run(State &Parent, const Function *Func, APValue &Result) {
{
InterpState State(Parent, *P, Stk, *this);
- State.Current = new InterpFrame(State, Func, /*Caller=*/nullptr, {});
+ State.Current = new InterpFrame(State, Func, /*Caller=*/nullptr, CodePtr(),
+ Func->getArgSize());
if (Interpret(State, Result)) {
assert(Stk.empty());
return true;
diff --git a/clang/lib/AST/Interp/EvalEmitter.cpp b/clang/lib/AST/Interp/EvalEmitter.cpp
index 945b78d7..c1e4ce3 100644
--- a/clang/lib/AST/Interp/EvalEmitter.cpp
+++ b/clang/lib/AST/Interp/EvalEmitter.cpp
@@ -22,7 +22,7 @@ EvalEmitter::EvalEmitter(Context &Ctx, Program &P, State &Parent,
: Ctx(Ctx), P(P), S(Parent, P, Stk, Ctx, this), EvalResult(&Ctx) {
// Create a dummy frame for the interpreter which does not have locals.
S.Current =
- new InterpFrame(S, /*Func=*/nullptr, /*Caller=*/nullptr, CodePtr());
+ new InterpFrame(S, /*Func=*/nullptr, /*Caller=*/nullptr, CodePtr(), 0);
}
EvalEmitter::~EvalEmitter() {
diff --git a/clang/lib/AST/Interp/Function.h b/clang/lib/AST/Interp/Function.h
index 7c3e0f6..6500e01 100644
--- a/clang/lib/AST/Interp/Function.h
+++ b/clang/lib/AST/Interp/Function.h
@@ -183,6 +183,16 @@ public:
unsigned getNumParams() const { return ParamTypes.size(); }
+ /// Returns the number of parameter this function takes when it's called,
+ /// i.e excluding the instance pointer and the RVO pointer.
+ unsigned getNumWrittenParams() const {
+ assert(getNumParams() >= (hasThisPointer() + hasRVO()));
+ return getNumParams() - hasThisPointer() - hasRVO();
+ }
+ unsigned getWrittenArgSize() const {
+ return ArgSize - (align(primSize(PT_Ptr)) * (hasThisPointer() + hasRVO()));
+ }
+
unsigned getParamOffset(unsigned ParamIndex) const {
return ParamOffsets[ParamIndex];
}
diff --git a/clang/lib/AST/Interp/Interp.cpp b/clang/lib/AST/Interp/Interp.cpp
index 683151f..2338f88 100644
--- a/clang/lib/AST/Interp/Interp.cpp
+++ b/clang/lib/AST/Interp/Interp.cpp
@@ -169,16 +169,27 @@ void cleanupAfterFunctionCall(InterpState &S, CodePtr OpPC) {
// CallExpr we're look for is at the return PC of the current function, i.e.
// in the caller.
// This code path should be executed very rarely.
- const auto *CE =
- cast<CallExpr>(S.Current->Caller->getExpr(S.Current->getRetPC()));
- unsigned FixedParams = CurFunc->getNumParams();
- int32_t ArgsToPop = CE->getNumArgs() - FixedParams;
- assert(ArgsToPop >= 0);
- for (int32_t I = ArgsToPop - 1; I >= 0; --I) {
- const Expr *A = CE->getArg(FixedParams + I);
+ unsigned NumVarArgs;
+ const Expr *const *Args = nullptr;
+ unsigned NumArgs = 0;
+ const Expr *CallSite = S.Current->Caller->getExpr(S.Current->getRetPC());
+ if (const auto *CE = dyn_cast<CallExpr>(CallSite)) {
+ Args = CE->getArgs();
+ NumArgs = CE->getNumArgs();
+ } else if (const auto *CE = dyn_cast<CXXConstructExpr>(CallSite)) {
+ Args = CE->getArgs();
+ NumArgs = CE->getNumArgs();
+ } else
+ assert(false && "Can't get arguments from that expression type");
+
+ assert(NumArgs >= CurFunc->getNumWrittenParams());
+ NumVarArgs = NumArgs - CurFunc->getNumWrittenParams();
+ for (unsigned I = 0; I != NumVarArgs; ++I) {
+ const Expr *A = Args[NumArgs - 1 - I];
popArg(S, A);
}
}
+
// And in any case, remove the fixed parameters (the non-variadic ones)
// at the end.
S.Current->popArgs();
diff --git a/clang/lib/AST/Interp/Interp.h b/clang/lib/AST/Interp/Interp.h
index e2fda18e..5bbb9f1 100644
--- a/clang/lib/AST/Interp/Interp.h
+++ b/clang/lib/AST/Interp/Interp.h
@@ -1915,10 +1915,60 @@ inline bool ArrayDecay(InterpState &S, CodePtr OpPC) {
return false;
}
-inline bool Call(InterpState &S, CodePtr OpPC, const Function *Func) {
+inline bool CallVar(InterpState &S, CodePtr OpPC, const Function *Func,
+ uint32_t VarArgSize) {
if (Func->hasThisPointer()) {
- size_t ThisOffset =
- Func->getArgSize() - (Func->hasRVO() ? primSize(PT_Ptr) : 0);
+ size_t ArgSize = Func->getArgSize() + VarArgSize;
+ size_t ThisOffset = ArgSize - (Func->hasRVO() ? primSize(PT_Ptr) : 0);
+ const Pointer &ThisPtr = S.Stk.peek<Pointer>(ThisOffset);
+
+ // If the current function is a lambda static invoker and
+ // the function we're about to call is a lambda call operator,
+ // skip the CheckInvoke, since the ThisPtr is a null pointer
+ // anyway.
+ if (!(S.Current->getFunction() &&
+ S.Current->getFunction()->isLambdaStaticInvoker() &&
+ Func->isLambdaCallOperator())) {
+ if (!CheckInvoke(S, OpPC, ThisPtr))
+ return false;
+ }
+
+ if (S.checkingPotentialConstantExpression())
+ return false;
+ }
+
+ if (!CheckCallable(S, OpPC, Func))
+ return false;
+
+ if (!CheckCallDepth(S, OpPC))
+ return false;
+
+ auto NewFrame = std::make_unique<InterpFrame>(S, Func, OpPC, VarArgSize);
+ InterpFrame *FrameBefore = S.Current;
+ S.Current = NewFrame.get();
+
+ APValue CallResult;
+ // Note that we cannot assert(CallResult.hasValue()) here since
+ // Ret() above only sets the APValue if the curent frame doesn't
+ // have a caller set.
+ if (Interpret(S, CallResult)) {
+ NewFrame.release(); // Frame was delete'd already.
+ assert(S.Current == FrameBefore);
+ return true;
+ }
+
+ // Interpreting the function failed somehow. Reset to
+ // previous state.
+ S.Current = FrameBefore;
+ return false;
+
+ return false;
+}
+inline bool Call(InterpState &S, CodePtr OpPC, const Function *Func,
+ uint32_t VarArgSize) {
+ if (Func->hasThisPointer()) {
+ size_t ArgSize = Func->getArgSize() + VarArgSize;
+ size_t ThisOffset = ArgSize - (Func->hasRVO() ? primSize(PT_Ptr) : 0);
const Pointer &ThisPtr = S.Stk.peek<Pointer>(ThisOffset);
@@ -1943,7 +1993,7 @@ inline bool Call(InterpState &S, CodePtr OpPC, const Function *Func) {
if (!CheckCallDepth(S, OpPC))
return false;
- auto NewFrame = std::make_unique<InterpFrame>(S, Func, OpPC);
+ auto NewFrame = std::make_unique<InterpFrame>(S, Func, OpPC, VarArgSize);
InterpFrame *FrameBefore = S.Current;
S.Current = NewFrame.get();
@@ -1963,11 +2013,12 @@ inline bool Call(InterpState &S, CodePtr OpPC, const Function *Func) {
return false;
}
-inline bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func) {
+inline bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func,
+ uint32_t VarArgSize) {
assert(Func->hasThisPointer());
assert(Func->isVirtual());
- size_t ThisOffset =
- Func->getArgSize() - (Func->hasRVO() ? primSize(PT_Ptr) : 0);
+ size_t ArgSize = Func->getArgSize() + VarArgSize;
+ size_t ThisOffset = ArgSize - (Func->hasRVO() ? primSize(PT_Ptr) : 0);
Pointer &ThisPtr = S.Stk.peek<Pointer>(ThisOffset);
const CXXRecordDecl *DynamicDecl =
@@ -1998,7 +2049,7 @@ inline bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func) {
}
}
- return Call(S, OpPC, Func);
+ return Call(S, OpPC, Func, VarArgSize);
}
inline bool CallBI(InterpState &S, CodePtr &PC, const Function *Func,
@@ -2016,17 +2067,19 @@ inline bool CallBI(InterpState &S, CodePtr &PC, const Function *Func,
return false;
}
-inline bool CallPtr(InterpState &S, CodePtr OpPC) {
+inline bool CallPtr(InterpState &S, CodePtr OpPC, uint32_t ArgSize) {
const FunctionPointer &FuncPtr = S.Stk.pop<FunctionPointer>();
const Function *F = FuncPtr.getFunction();
- if (!F || !F->isConstexpr())
- return false;
+ assert(F);
+
+ assert(ArgSize >= F->getWrittenArgSize());
+ uint32_t VarArgSize = ArgSize - F->getWrittenArgSize();
if (F->isVirtual())
- return CallVirt(S, OpPC, F);
+ return CallVirt(S, OpPC, F, VarArgSize);
- return Call(S, OpPC, F);
+ return Call(S, OpPC, F, VarArgSize);
}
inline bool GetFnPtr(InterpState &S, CodePtr OpPC, const Function *Func) {
diff --git a/clang/lib/AST/Interp/InterpFrame.cpp b/clang/lib/AST/Interp/InterpFrame.cpp
index dd05dac..f69ff06 100644
--- a/clang/lib/AST/Interp/InterpFrame.cpp
+++ b/clang/lib/AST/Interp/InterpFrame.cpp
@@ -22,10 +22,10 @@ using namespace clang;
using namespace clang::interp;
InterpFrame::InterpFrame(InterpState &S, const Function *Func,
- InterpFrame *Caller, CodePtr RetPC)
+ InterpFrame *Caller, CodePtr RetPC, unsigned ArgSize)
: Caller(Caller), S(S), Depth(Caller ? Caller->Depth + 1 : 0), Func(Func),
- RetPC(RetPC), ArgSize(Func ? Func->getArgSize() : 0),
- Args(static_cast<char *>(S.Stk.top())), FrameOffset(S.Stk.size()) {
+ RetPC(RetPC), ArgSize(ArgSize), Args(static_cast<char *>(S.Stk.top())),
+ FrameOffset(S.Stk.size()) {
if (!Func)
return;
@@ -43,8 +43,9 @@ InterpFrame::InterpFrame(InterpState &S, const Function *Func,
}
}
-InterpFrame::InterpFrame(InterpState &S, const Function *Func, CodePtr RetPC)
- : InterpFrame(S, Func, S.Current, RetPC) {
+InterpFrame::InterpFrame(InterpState &S, const Function *Func, CodePtr RetPC,
+ unsigned VarArgSize)
+ : InterpFrame(S, Func, S.Current, RetPC, Func->getArgSize() + VarArgSize) {
// As per our calling convention, the this pointer is
// part of the ArgSize.
// If the function has RVO, the RVO pointer is first.
@@ -228,10 +229,16 @@ SourceInfo InterpFrame::getSource(CodePtr PC) const {
}
const Expr *InterpFrame::getExpr(CodePtr PC) const {
+ if (Func && (!Func->hasBody() || Func->getDecl()->isImplicit()) && Caller)
+ return Caller->getExpr(RetPC);
+
return S.getExpr(Func, PC);
}
SourceLocation InterpFrame::getLocation(CodePtr PC) const {
+ if (Func && (!Func->hasBody() || Func->getDecl()->isImplicit()) && Caller)
+ return Caller->getLocation(RetPC);
+
return S.getLocation(Func, PC);
}
diff --git a/clang/lib/AST/Interp/InterpFrame.h b/clang/lib/AST/Interp/InterpFrame.h
index cba4f95..322d5dc 100644
--- a/clang/lib/AST/Interp/InterpFrame.h
+++ b/clang/lib/AST/Interp/InterpFrame.h
@@ -32,13 +32,14 @@ public:
/// Creates a new frame for a method call.
InterpFrame(InterpState &S, const Function *Func, InterpFrame *Caller,
- CodePtr RetPC);
+ CodePtr RetPC, unsigned ArgSize);
/// Creates a new frame with the values that make sense.
/// I.e., the caller is the current frame of S,
/// the This() pointer is the current Pointer on the top of S's stack,
/// and the RVO pointer is before that.
- InterpFrame(InterpState &S, const Function *Func, CodePtr RetPC);
+ InterpFrame(InterpState &S, const Function *Func, CodePtr RetPC,
+ unsigned VarArgSize = 0);
/// Destroys the frame, killing all live pointers to stack slots.
~InterpFrame();
diff --git a/clang/lib/AST/Interp/Opcodes.td b/clang/lib/AST/Interp/Opcodes.td
index 7f5bd7e..f1b0894 100644
--- a/clang/lib/AST/Interp/Opcodes.td
+++ b/clang/lib/AST/Interp/Opcodes.td
@@ -191,12 +191,12 @@ def NoRet : Opcode {}
def Call : Opcode {
- let Args = [ArgFunction];
+ let Args = [ArgFunction, ArgUint32];
let Types = [];
}
def CallVirt : Opcode {
- let Args = [ArgFunction];
+ let Args = [ArgFunction, ArgUint32];
let Types = [];
}
@@ -206,7 +206,12 @@ def CallBI : Opcode {
}
def CallPtr : Opcode {
- let Args = [];
+ let Args = [ArgUint32];
+ let Types = [];
+}
+
+def CallVar : Opcode {
+ let Args = [ArgFunction, ArgUint32];
let Types = [];
}
diff --git a/clang/lib/AST/Interp/Pointer.cpp b/clang/lib/AST/Interp/Pointer.cpp
index 8a0a155..3f85635 100644
--- a/clang/lib/AST/Interp/Pointer.cpp
+++ b/clang/lib/AST/Interp/Pointer.cpp
@@ -232,11 +232,7 @@ std::optional<APValue> Pointer::toRValue(const Context &Ctx) const {
// Primitive values.
if (std::optional<PrimType> T = Ctx.classify(Ty)) {
- if (T == PT_Ptr || T == PT_FnPtr) {
- R = Ptr.toAPValue();
- } else {
- TYPE_SWITCH(*T, R = Ptr.deref<T>().toAPValue());
- }
+ TYPE_SWITCH(*T, R = Ptr.deref<T>().toAPValue());
return true;
}
diff --git a/clang/lib/AST/Interp/Program.cpp b/clang/lib/AST/Interp/Program.cpp
index b2b478a..964c037 100644
--- a/clang/lib/AST/Interp/Program.cpp
+++ b/clang/lib/AST/Interp/Program.cpp
@@ -169,7 +169,7 @@ std::optional<unsigned> Program::createGlobal(const ValueDecl *VD,
if (const auto *Var = dyn_cast<VarDecl>(VD)) {
IsStatic = Context::shouldBeGloballyIndexed(VD);
IsExtern = !Var->getAnyInitializer();
- } else if (isa<UnnamedGlobalConstantDecl>(VD)) {
+ } else if (isa<UnnamedGlobalConstantDecl, MSGuidDecl>(VD)) {
IsStatic = true;
IsExtern = false;
} else {
diff --git a/clang/lib/AST/Interp/Program.h b/clang/lib/AST/Interp/Program.h
index 1734268..364a63d 100644
--- a/clang/lib/AST/Interp/Program.h
+++ b/clang/lib/AST/Interp/Program.h
@@ -86,7 +86,7 @@ public:
std::optional<unsigned> getOrCreateDummy(const ValueDecl *VD);
/// Creates a global and returns its index.
- std::optional<unsigned> createGlobal(const ValueDecl *VD, const Expr *E);
+ std::optional<unsigned> createGlobal(const ValueDecl *VD, const Expr *Init);
/// Creates a global from a lifetime-extended temporary.
std::optional<unsigned> createGlobal(const Expr *E);
diff --git a/clang/lib/AST/Interp/Source.cpp b/clang/lib/AST/Interp/Source.cpp
index 4e032c9..45cd0ad 100644
--- a/clang/lib/AST/Interp/Source.cpp
+++ b/clang/lib/AST/Interp/Source.cpp
@@ -33,7 +33,7 @@ SourceRange SourceInfo::getRange() const {
}
const Expr *SourceInfo::asExpr() const {
- if (auto *S = Source.dyn_cast<const Stmt *>())
+ if (const auto *S = Source.dyn_cast<const Stmt *>())
return dyn_cast<Expr>(S);
return nullptr;
}
diff --git a/clang/lib/AST/Stmt.cpp b/clang/lib/AST/Stmt.cpp
index afd0588..fe59d60 100644
--- a/clang/lib/AST/Stmt.cpp
+++ b/clang/lib/AST/Stmt.cpp
@@ -23,6 +23,7 @@
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtOpenACC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/Type.h"
#include "clang/Basic/CharInfo.h"
diff --git a/clang/lib/AST/StmtOpenACC.cpp b/clang/lib/AST/StmtOpenACC.cpp
new file mode 100644
index 0000000..1a99c24
--- /dev/null
+++ b/clang/lib/AST/StmtOpenACC.cpp
@@ -0,0 +1,33 @@
+//===--- StmtOpenACC.cpp - Classes for OpenACC Constructs -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the subclesses of Stmt class declared in StmtOpenACC.h
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtOpenACC.h"
+#include "clang/AST/ASTContext.h"
+using namespace clang;
+
+OpenACCComputeConstruct *
+OpenACCComputeConstruct::CreateEmpty(const ASTContext &C, EmptyShell) {
+ void *Mem = C.Allocate(sizeof(OpenACCComputeConstruct),
+ alignof(OpenACCComputeConstruct));
+ auto *Inst = new (Mem) OpenACCComputeConstruct;
+ return Inst;
+}
+
+OpenACCComputeConstruct *
+OpenACCComputeConstruct::Create(const ASTContext &C, OpenACCDirectiveKind K,
+ SourceLocation BeginLoc,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(sizeof(OpenACCComputeConstruct),
+ alignof(OpenACCComputeConstruct));
+ auto *Inst = new (Mem) OpenACCComputeConstruct(K, BeginLoc, EndLoc);
+ return Inst;
+}
diff --git a/clang/lib/AST/StmtPrinter.cpp b/clang/lib/AST/StmtPrinter.cpp
index 1df040e..d66c3cc 100644
--- a/clang/lib/AST/StmtPrinter.cpp
+++ b/clang/lib/AST/StmtPrinter.cpp
@@ -1138,6 +1138,15 @@ void StmtPrinter::VisitOMPTargetParallelGenericLoopDirective(
}
//===----------------------------------------------------------------------===//
+// OpenACC construct printing methods
+//===----------------------------------------------------------------------===//
+void StmtPrinter::VisitOpenACCComputeConstruct(OpenACCComputeConstruct *S) {
+ Indent() << "#pragma acc " << S->getDirectiveKind();
+ // TODO OpenACC: Print Clauses.
+ PrintStmt(S->getStructuredBlock());
+}
+
+//===----------------------------------------------------------------------===//
// Expr printing methods.
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp
index 1b817cf..b545ff4 100644
--- a/clang/lib/AST/StmtProfile.cpp
+++ b/clang/lib/AST/StmtProfile.cpp
@@ -2441,6 +2441,13 @@ void StmtProfiler::VisitTemplateArgument(const TemplateArgument &Arg) {
}
}
+void StmtProfiler::VisitOpenACCComputeConstruct(
+ const OpenACCComputeConstruct *S) {
+ // VisitStmt handles children, so the AssociatedStmt is handled.
+ VisitStmt(S);
+ // TODO OpenACC: Visit Clauses.
+}
+
void Stmt::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical, bool ProfileLambdaExpr) const {
StmtProfilerWithPointers Profiler(ID, Context, Canonical, ProfileLambdaExpr);
diff --git a/clang/lib/AST/TextNodeDumper.cpp b/clang/lib/AST/TextNodeDumper.cpp
index 0000d26d..b683eb1 100644
--- a/clang/lib/AST/TextNodeDumper.cpp
+++ b/clang/lib/AST/TextNodeDumper.cpp
@@ -2668,3 +2668,8 @@ void TextNodeDumper::VisitHLSLBufferDecl(const HLSLBufferDecl *D) {
OS << " tbuffer";
dumpName(D);
}
+
+void TextNodeDumper::VisitOpenACCConstructStmt(const OpenACCConstructStmt *S) {
+ OS << " " << S->getDirectiveKind();
+ // TODO OpenACC: Dump clauses as well.
+}
diff --git a/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp b/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
index 24811de..d487944 100644
--- a/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
+++ b/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
@@ -887,34 +887,10 @@ Value *Environment::createValueUnlessSelfReferential(
if (Type->isRecordType()) {
CreatedValuesCount++;
- llvm::DenseMap<const ValueDecl *, StorageLocation *> FieldLocs;
- for (const FieldDecl *Field : DACtx->getModeledFields(Type)) {
- assert(Field != nullptr);
+ auto &Loc = cast<RecordStorageLocation>(createStorageLocation(Type));
+ initializeFieldsWithValues(Loc, Visited, Depth, CreatedValuesCount);
- QualType FieldType = Field->getType();
-
- FieldLocs.insert(
- {Field, &createLocAndMaybeValue(FieldType, Visited, Depth + 1,
- CreatedValuesCount)});
- }
-
- RecordStorageLocation::SyntheticFieldMap SyntheticFieldLocs;
- for (const auto &Entry : DACtx->getSyntheticFields(Type)) {
- SyntheticFieldLocs.insert(
- {Entry.getKey(),
- &createLocAndMaybeValue(Entry.getValue(), Visited, Depth + 1,
- CreatedValuesCount)});
- }
-
- RecordStorageLocation &Loc = DACtx->createRecordStorageLocation(
- Type, std::move(FieldLocs), std::move(SyntheticFieldLocs));
- RecordValue &RecordVal = create<RecordValue>(Loc);
-
- // As we already have a storage location for the `RecordValue`, we can and
- // should associate them in the environment.
- setValue(Loc, RecordVal);
-
- return &RecordVal;
+ return &refreshRecordValue(Loc, *this);
}
return nullptr;
@@ -943,6 +919,50 @@ Environment::createLocAndMaybeValue(QualType Ty,
return Loc;
}
+void Environment::initializeFieldsWithValues(RecordStorageLocation &Loc,
+ llvm::DenseSet<QualType> &Visited,
+ int Depth,
+ int &CreatedValuesCount) {
+ auto initField = [&](QualType FieldType, StorageLocation &FieldLoc) {
+ if (FieldType->isRecordType()) {
+ auto &FieldRecordLoc = cast<RecordStorageLocation>(FieldLoc);
+ setValue(FieldRecordLoc, create<RecordValue>(FieldRecordLoc));
+ initializeFieldsWithValues(FieldRecordLoc, Visited, Depth + 1,
+ CreatedValuesCount);
+ } else {
+ if (!Visited.insert(FieldType.getCanonicalType()).second)
+ return;
+ if (Value *Val = createValueUnlessSelfReferential(
+ FieldType, Visited, Depth + 1, CreatedValuesCount))
+ setValue(FieldLoc, *Val);
+ Visited.erase(FieldType.getCanonicalType());
+ }
+ };
+
+ for (const auto &[Field, FieldLoc] : Loc.children()) {
+ assert(Field != nullptr);
+ QualType FieldType = Field->getType();
+
+ if (FieldType->isReferenceType()) {
+ Loc.setChild(*Field,
+ &createLocAndMaybeValue(FieldType, Visited, Depth + 1,
+ CreatedValuesCount));
+ } else {
+ assert(FieldLoc != nullptr);
+ initField(FieldType, *FieldLoc);
+ }
+ }
+ for (const auto &[FieldName, FieldLoc] : Loc.synthetic_fields()) {
+ assert(FieldLoc != nullptr);
+ QualType FieldType = FieldLoc->getType();
+
+ // Synthetic fields cannot have reference type, so we don't need to deal
+ // with this case.
+ assert(!FieldType->isReferenceType());
+ initField(FieldType, Loc.getSyntheticField(FieldName));
+ }
+}
+
StorageLocation &Environment::createObjectInternal(const ValueDecl *D,
QualType Ty,
const Expr *InitExpr) {
diff --git a/clang/lib/Analysis/FlowSensitive/Transfer.cpp b/clang/lib/Analysis/FlowSensitive/Transfer.cpp
index a098471..f0b15f4 100644
--- a/clang/lib/Analysis/FlowSensitive/Transfer.cpp
+++ b/clang/lib/Analysis/FlowSensitive/Transfer.cpp
@@ -535,7 +535,19 @@ public:
return;
copyRecord(*LocSrc, *LocDst, Env);
- Env.setStorageLocation(*S, *LocDst);
+
+ // If the expr is a glvalue, we can reasonably assume the operator is
+ // returning T& and thus we can assign it `LocDst`.
+ if (S->isGLValue()) {
+ Env.setStorageLocation(*S, *LocDst);
+ } else if (S->getType()->isRecordType()) {
+ // Make sure that we have a `RecordValue` for this expression so that
+ // `Environment::getResultObjectLocation()` is able to return a location
+ // for it.
+ if (Env.getValue(*S) == nullptr)
+ refreshRecordValue(*S, Env);
+ }
+
return;
}
diff --git a/clang/lib/Analysis/UnsafeBufferUsage.cpp b/clang/lib/Analysis/UnsafeBufferUsage.cpp
index 3c2a6fd..769c6d9 100644
--- a/clang/lib/Analysis/UnsafeBufferUsage.cpp
+++ b/clang/lib/Analysis/UnsafeBufferUsage.cpp
@@ -7,11 +7,14 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/UnsafeBufferUsage.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/Stmt.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/ASTMatchers/ASTMatchers.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Lex/Lexer.h"
@@ -281,10 +284,13 @@ isInUnspecifiedPointerContext(internal::Matcher<Stmt> InnerMatcher) {
// 4. the operand of a pointer subtraction operation
// (i.e., computing the distance between two pointers); or ...
- auto CallArgMatcher =
- callExpr(forEachArgumentWithParam(InnerMatcher,
- hasPointerType() /* array also decays to pointer type*/),
- unless(callee(functionDecl(hasAttr(attr::UnsafeBufferUsage)))));
+ // clang-format off
+ auto CallArgMatcher = callExpr(
+ forEachArgumentWithParamType(
+ InnerMatcher,
+ isAnyPointer() /* array also decays to pointer type*/),
+ unless(callee(
+ functionDecl(hasAttr(attr::UnsafeBufferUsage)))));
auto CastOperandMatcher =
castExpr(anyOf(hasCastKind(CastKind::CK_PointerToIntegral),
@@ -306,6 +312,7 @@ isInUnspecifiedPointerContext(internal::Matcher<Stmt> InnerMatcher) {
hasRHS(hasPointerType())),
eachOf(hasLHS(InnerMatcher),
hasRHS(InnerMatcher)));
+ // clang-format on
return stmt(anyOf(CallArgMatcher, CastOperandMatcher, CompOperandMatcher,
PtrSubtractionMatcher));
@@ -402,6 +409,39 @@ AST_MATCHER(CXXConstructExpr, isSafeSpanTwoParamConstruct) {
}
return false;
}
+
+AST_MATCHER(ArraySubscriptExpr, isSafeArraySubscript) {
+ // FIXME: Proper solution:
+ // - refactor Sema::CheckArrayAccess
+ // - split safe/OOB/unknown decision logic from diagnostics emitting code
+ // - e. g. "Try harder to find a NamedDecl to point at in the note."
+ // already duplicated
+ // - call both from Sema and from here
+
+ const auto *BaseDRE =
+ dyn_cast<DeclRefExpr>(Node.getBase()->IgnoreParenImpCasts());
+ if (!BaseDRE)
+ return false;
+ if (!BaseDRE->getDecl())
+ return false;
+ const auto *CATy = Finder->getASTContext().getAsConstantArrayType(
+ BaseDRE->getDecl()->getType());
+ if (!CATy)
+ return false;
+ const APInt ArrSize = CATy->getSize();
+
+ if (const auto *IdxLit = dyn_cast<IntegerLiteral>(Node.getIdx())) {
+ const APInt ArrIdx = IdxLit->getValue();
+ // FIXME: ArrIdx.isNegative() we could immediately emit an error as that's a
+ // bug
+ if (ArrIdx.isNonNegative() &&
+ ArrIdx.getLimitedValue() < ArrSize.getLimitedValue())
+ return true;
+ }
+
+ return false;
+}
+
} // namespace clang::ast_matchers
namespace {
@@ -594,16 +634,16 @@ public:
}
static Matcher matcher() {
- // FIXME: What if the index is integer literal 0? Should this be
- // a safe gadget in this case?
- // clang-format off
+ // clang-format off
return stmt(arraySubscriptExpr(
hasBase(ignoringParenImpCasts(
anyOf(hasPointerType(), hasArrayType()))),
- unless(hasIndex(
- anyOf(integerLiteral(equals(0)), arrayInitIndexExpr())
- )))
- .bind(ArraySubscrTag));
+ unless(anyOf(
+ isSafeArraySubscript(),
+ hasIndex(
+ anyOf(integerLiteral(equals(0)), arrayInitIndexExpr())
+ )
+ ))).bind(ArraySubscrTag));
// clang-format on
}
@@ -762,7 +802,8 @@ public:
/// \code
/// p = q;
/// \endcode
-class PointerAssignmentGadget : public FixableGadget {
+/// where both `p` and `q` are pointers.
+class PtrToPtrAssignmentGadget : public FixableGadget {
private:
static constexpr const char *const PointerAssignLHSTag = "ptrLHS";
static constexpr const char *const PointerAssignRHSTag = "ptrRHS";
@@ -770,13 +811,13 @@ private:
const DeclRefExpr * PtrRHS; // the RHS pointer expression in `PA`
public:
- PointerAssignmentGadget(const MatchFinder::MatchResult &Result)
- : FixableGadget(Kind::PointerAssignment),
- PtrLHS(Result.Nodes.getNodeAs<DeclRefExpr>(PointerAssignLHSTag)),
- PtrRHS(Result.Nodes.getNodeAs<DeclRefExpr>(PointerAssignRHSTag)) {}
+ PtrToPtrAssignmentGadget(const MatchFinder::MatchResult &Result)
+ : FixableGadget(Kind::PtrToPtrAssignment),
+ PtrLHS(Result.Nodes.getNodeAs<DeclRefExpr>(PointerAssignLHSTag)),
+ PtrRHS(Result.Nodes.getNodeAs<DeclRefExpr>(PointerAssignRHSTag)) {}
static bool classof(const Gadget *G) {
- return G->getKind() == Kind::PointerAssignment;
+ return G->getKind() == Kind::PtrToPtrAssignment;
}
static Matcher matcher() {
@@ -811,6 +852,60 @@ public:
}
};
+/// An assignment expression of the form:
+/// \code
+/// ptr = array;
+/// \endcode
+/// where `p` is a pointer and `array` is a constant size array.
+class CArrayToPtrAssignmentGadget : public FixableGadget {
+private:
+ static constexpr const char *const PointerAssignLHSTag = "ptrLHS";
+ static constexpr const char *const PointerAssignRHSTag = "ptrRHS";
+ const DeclRefExpr *PtrLHS; // the LHS pointer expression in `PA`
+ const DeclRefExpr *PtrRHS; // the RHS pointer expression in `PA`
+
+public:
+ CArrayToPtrAssignmentGadget(const MatchFinder::MatchResult &Result)
+ : FixableGadget(Kind::CArrayToPtrAssignment),
+ PtrLHS(Result.Nodes.getNodeAs<DeclRefExpr>(PointerAssignLHSTag)),
+ PtrRHS(Result.Nodes.getNodeAs<DeclRefExpr>(PointerAssignRHSTag)) {}
+
+ static bool classof(const Gadget *G) {
+ return G->getKind() == Kind::CArrayToPtrAssignment;
+ }
+
+ static Matcher matcher() {
+ auto PtrAssignExpr = binaryOperator(
+ allOf(hasOperatorName("="),
+ hasRHS(ignoringParenImpCasts(
+ declRefExpr(hasType(hasCanonicalType(constantArrayType())),
+ toSupportedVariable())
+ .bind(PointerAssignRHSTag))),
+ hasLHS(declRefExpr(hasPointerType(), toSupportedVariable())
+ .bind(PointerAssignLHSTag))));
+
+ return stmt(isInUnspecifiedUntypedContext(PtrAssignExpr));
+ }
+
+ virtual std::optional<FixItList>
+ getFixits(const FixitStrategy &S) const override;
+
+ virtual const Stmt *getBaseStmt() const override {
+ // FIXME: This should be the binary operator, assuming that this method
+ // makes sense at all on a FixableGadget.
+ return PtrLHS;
+ }
+
+ virtual DeclUseList getClaimedVarUseSites() const override {
+ return DeclUseList{PtrLHS, PtrRHS};
+ }
+
+ virtual std::optional<std::pair<const VarDecl *, const VarDecl *>>
+ getStrategyImplications() const override {
+ return {};
+ }
+};
+
/// A call of a function or method that performs unchecked buffer operations
/// over one of its pointer parameters.
class UnsafeBufferUsageAttrGadget : public WarningGadget {
@@ -1434,7 +1529,7 @@ bool clang::internal::anyConflict(const SmallVectorImpl<FixItHint> &FixIts,
}
std::optional<FixItList>
-PointerAssignmentGadget::getFixits(const FixitStrategy &S) const {
+PtrToPtrAssignmentGadget::getFixits(const FixitStrategy &S) const {
const auto *LeftVD = cast<VarDecl>(PtrLHS->getDecl());
const auto *RightVD = cast<VarDecl>(PtrRHS->getDecl());
switch (S.lookup(LeftVD)) {
@@ -1453,6 +1548,42 @@ PointerAssignmentGadget::getFixits(const FixitStrategy &S) const {
return std::nullopt;
}
+/// \returns fixit that adds .data() call after \DRE.
+static inline std::optional<FixItList> createDataFixit(const ASTContext &Ctx,
+ const DeclRefExpr *DRE);
+
+std::optional<FixItList>
+CArrayToPtrAssignmentGadget::getFixits(const FixitStrategy &S) const {
+ const auto *LeftVD = cast<VarDecl>(PtrLHS->getDecl());
+ const auto *RightVD = cast<VarDecl>(PtrRHS->getDecl());
+ // TLDR: Implementing fixits for non-Wontfix strategy on both LHS and RHS is
+ // non-trivial.
+ //
+ // CArrayToPtrAssignmentGadget doesn't have strategy implications because
+ // constant size array propagates its bounds. Because of that LHS and RHS are
+ // addressed by two different fixits.
+ //
+ // At the same time FixitStrategy S doesn't reflect what group a fixit belongs
+ // to and can't be generally relied on in multi-variable Fixables!
+ //
+ // E. g. If an instance of this gadget is fixing variable on LHS then the
+ // variable on RHS is fixed by a different fixit and its strategy for LHS
+ // fixit is as if Wontfix.
+ //
+ // The only exception is Wontfix strategy for a given variable as that is
+ // valid for any fixit produced for the given input source code.
+ if (S.lookup(LeftVD) == FixitStrategy::Kind::Span) {
+ if (S.lookup(RightVD) == FixitStrategy::Kind::Wontfix) {
+ return FixItList{};
+ }
+ } else if (S.lookup(LeftVD) == FixitStrategy::Kind::Wontfix) {
+ if (S.lookup(RightVD) == FixitStrategy::Kind::Array) {
+ return createDataFixit(RightVD->getASTContext(), PtrRHS);
+ }
+ }
+ return std::nullopt;
+}
+
std::optional<FixItList>
PointerInitGadget::getFixits(const FixitStrategy &S) const {
const auto *LeftVD = PtrInitLHS;
@@ -1870,27 +2001,33 @@ PointerDereferenceGadget::getFixits(const FixitStrategy &S) const {
return std::nullopt;
}
+static inline std::optional<FixItList> createDataFixit(const ASTContext &Ctx,
+ const DeclRefExpr *DRE) {
+ const SourceManager &SM = Ctx.getSourceManager();
+ // Inserts the .data() after the DRE
+ std::optional<SourceLocation> EndOfOperand =
+ getPastLoc(DRE, SM, Ctx.getLangOpts());
+
+ if (EndOfOperand)
+ return FixItList{{FixItHint::CreateInsertion(*EndOfOperand, ".data()")}};
+
+ return std::nullopt;
+}
+
// Generates fix-its replacing an expression of the form UPC(DRE) with
// `DRE.data()`
std::optional<FixItList>
UPCStandalonePointerGadget::getFixits(const FixitStrategy &S) const {
const auto VD = cast<VarDecl>(Node->getDecl());
switch (S.lookup(VD)) {
+ case FixitStrategy::Kind::Array:
case FixitStrategy::Kind::Span: {
- ASTContext &Ctx = VD->getASTContext();
- SourceManager &SM = Ctx.getSourceManager();
- // Inserts the .data() after the DRE
- std::optional<SourceLocation> EndOfOperand =
- getPastLoc(Node, SM, Ctx.getLangOpts());
-
- if (EndOfOperand)
- return FixItList{{FixItHint::CreateInsertion(*EndOfOperand, ".data()")}};
+ return createDataFixit(VD->getASTContext(), Node);
// FIXME: Points inside a macro expansion.
break;
}
case FixitStrategy::Kind::Wontfix:
case FixitStrategy::Kind::Iterator:
- case FixitStrategy::Kind::Array:
return std::nullopt;
case FixitStrategy::Kind::Vector:
llvm_unreachable("unsupported strategies for FixableGadgets");
diff --git a/clang/lib/Basic/FileManager.cpp b/clang/lib/Basic/FileManager.cpp
index 6097a27..cd520a6 100644
--- a/clang/lib/Basic/FileManager.cpp
+++ b/clang/lib/Basic/FileManager.cpp
@@ -547,7 +547,7 @@ FileManager::getBufferForFile(FileEntryRef FE, bool isVolatile,
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
FileManager::getBufferForFileImpl(StringRef Filename, int64_t FileSize,
bool isVolatile,
- bool RequiresNullTerminator) {
+ bool RequiresNullTerminator) const {
if (FileSystemOpts.WorkingDir.empty())
return FS->getBufferForFile(Filename, FileSize, RequiresNullTerminator,
isVolatile);
diff --git a/clang/lib/Basic/IdentifierTable.cpp b/clang/lib/Basic/IdentifierTable.cpp
index d0d8316..a9b07ac 100644
--- a/clang/lib/Basic/IdentifierTable.cpp
+++ b/clang/lib/Basic/IdentifierTable.cpp
@@ -36,7 +36,7 @@ using namespace clang;
// A check to make sure the ObjCOrBuiltinID has sufficient room to store the
// largest possible target/aux-target combination. If we exceed this, we likely
// need to just change the ObjCOrBuiltinIDBits value in IdentifierTable.h.
-static_assert(2 * LargestBuiltinID < (2 << (ObjCOrBuiltinIDBits - 1)),
+static_assert(2 * LargestBuiltinID < (2 << (InterestingIdentifierBits - 1)),
"Insufficient ObjCOrBuiltinID Bits");
//===----------------------------------------------------------------------===//
@@ -280,13 +280,13 @@ static void AddObjCKeyword(StringRef Name,
Table.get(Name).setObjCKeywordID(ObjCID);
}
-static void AddInterestingIdentifier(StringRef Name,
- tok::InterestingIdentifierKind BTID,
- IdentifierTable &Table) {
- // Don't add 'not_interesting' identifier.
- if (BTID != tok::not_interesting) {
+static void AddNotableIdentifier(StringRef Name,
+ tok::NotableIdentifierKind BTID,
+ IdentifierTable &Table) {
+ // Don't add 'not_notable' identifier.
+ if (BTID != tok::not_notable) {
IdentifierInfo &Info = Table.get(Name, tok::identifier);
- Info.setInterestingIdentifierID(BTID);
+ Info.setNotableIdentifierID(BTID);
}
}
@@ -306,8 +306,8 @@ void IdentifierTable::AddKeywords(const LangOptions &LangOpts) {
#define OBJC_AT_KEYWORD(NAME) \
if (LangOpts.ObjC) \
AddObjCKeyword(StringRef(#NAME), tok::objc_##NAME, *this);
-#define INTERESTING_IDENTIFIER(NAME) \
- AddInterestingIdentifier(StringRef(#NAME), tok::NAME, *this);
+#define NOTABLE_IDENTIFIER(NAME) \
+ AddNotableIdentifier(StringRef(#NAME), tok::NAME, *this);
#define TESTING_KEYWORD(NAME, FLAGS)
#include "clang/Basic/TokenKinds.def"
diff --git a/clang/lib/Basic/Targets/AArch64.cpp b/clang/lib/Basic/Targets/AArch64.cpp
index 6803296..dd0218e 100644
--- a/clang/lib/Basic/Targets/AArch64.cpp
+++ b/clang/lib/Basic/Targets/AArch64.cpp
@@ -367,8 +367,20 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
// ACLE predefines. Many can only have one possible value on v8 AArch64.
Builder.defineMacro("__ARM_ACLE", "200");
- Builder.defineMacro("__ARM_ARCH",
- std::to_string(ArchInfo->Version.getMajor()));
+
+ // __ARM_ARCH is defined as an integer value indicating the current ARM ISA.
+ // For ISAs up to and including v8, __ARM_ARCH is equal to the major version
+ // number. For ISAs from v8.1 onwards, __ARM_ARCH is scaled up to include the
+ // minor version number, e.g. for ARM architecture ARMvX.Y:
+ // __ARM_ARCH = X * 100 + Y.
+ if (ArchInfo->Version.getMajor() == 8 && ArchInfo->Version.getMinor() == 0)
+ Builder.defineMacro("__ARM_ARCH",
+ std::to_string(ArchInfo->Version.getMajor()));
+ else
+ Builder.defineMacro("__ARM_ARCH",
+ std::to_string(ArchInfo->Version.getMajor() * 100 +
+ ArchInfo->Version.getMinor().value()));
+
Builder.defineMacro("__ARM_ARCH_PROFILE",
std::string("'") + (char)ArchInfo->Profile + "'");
diff --git a/clang/lib/Basic/Targets/AMDGPU.cpp b/clang/lib/Basic/Targets/AMDGPU.cpp
index 10cba6b..5742885 100644
--- a/clang/lib/Basic/Targets/AMDGPU.cpp
+++ b/clang/lib/Basic/Targets/AMDGPU.cpp
@@ -285,10 +285,9 @@ void AMDGPUTargetInfo::getTargetDefines(const LangOptions &Opts,
: getArchNameR600(GPUKind));
// Sanitize the name of generic targets.
- // e.g. gfx10.1-generic -> gfx10_1_generic
+ // e.g. gfx10-1-generic -> gfx10_1_generic
if (GPUKind >= llvm::AMDGPU::GK_AMDGCN_GENERIC_FIRST &&
GPUKind <= llvm::AMDGPU::GK_AMDGCN_GENERIC_LAST) {
- std::replace(CanonName.begin(), CanonName.end(), '.', '_');
std::replace(CanonName.begin(), CanonName.end(), '-', '_');
}
diff --git a/clang/lib/Basic/Targets/ARM.cpp b/clang/lib/Basic/Targets/ARM.cpp
index 55b7155..cd7fb95 100644
--- a/clang/lib/Basic/Targets/ARM.cpp
+++ b/clang/lib/Basic/Targets/ARM.cpp
@@ -130,6 +130,7 @@ void ARMTargetInfo::setArchInfo(llvm::ARM::ArchKind Kind) {
SubArch = llvm::ARM::getSubArch(ArchKind);
ArchProfile = llvm::ARM::parseArchProfile(SubArch);
ArchVersion = llvm::ARM::parseArchVersion(SubArch);
+ ArchMinorVersion = llvm::ARM::parseArchMinorVersion(SubArch);
// cache CPU related strings
CPUAttr = getCPUAttr();
@@ -736,9 +737,16 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
if (!CPUAttr.empty())
Builder.defineMacro("__ARM_ARCH_" + CPUAttr + "__");
- // ACLE 6.4.1 ARM/Thumb instruction set architecture
- // __ARM_ARCH is defined as an integer value indicating the current ARM ISA
- Builder.defineMacro("__ARM_ARCH", Twine(ArchVersion));
+ // __ARM_ARCH is defined as an integer value indicating the current ARM ISA.
+ // For ISAs up to and including v8, __ARM_ARCH is equal to the major version
+ // number. For ISAs from v8.1 onwards, __ARM_ARCH is scaled up to include the
+ // minor version number, e.g. for ARM architecture ARMvX.Y:
+ // __ARM_ARCH = X * 100 + Y.
+ if (ArchVersion >= 9 || ArchMinorVersion != 0)
+ Builder.defineMacro("__ARM_ARCH",
+ Twine(ArchVersion * 100 + ArchMinorVersion));
+ else
+ Builder.defineMacro("__ARM_ARCH", Twine(ArchVersion));
if (ArchVersion >= 8) {
// ACLE 6.5.7 Crypto Extension
diff --git a/clang/lib/Basic/Targets/ARM.h b/clang/lib/Basic/Targets/ARM.h
index 71322a0..df06e4d 100644
--- a/clang/lib/Basic/Targets/ARM.h
+++ b/clang/lib/Basic/Targets/ARM.h
@@ -60,6 +60,7 @@ class LLVM_LIBRARY_VISIBILITY ARMTargetInfo : public TargetInfo {
llvm::ARM::ArchKind ArchKind = llvm::ARM::ArchKind::ARMV4T;
llvm::ARM::ProfileKind ArchProfile;
unsigned ArchVersion;
+ unsigned ArchMinorVersion;
LLVM_PREFERRED_TYPE(FPUMode)
unsigned FPU : 5;
diff --git a/clang/lib/CMakeLists.txt b/clang/lib/CMakeLists.txt
index 1526d65..0cac864 100644
--- a/clang/lib/CMakeLists.txt
+++ b/clang/lib/CMakeLists.txt
@@ -23,6 +23,7 @@ add_subdirectory(Tooling)
add_subdirectory(DirectoryWatcher)
add_subdirectory(Index)
add_subdirectory(IndexSerialization)
+add_subdirectory(InstallAPI)
add_subdirectory(StaticAnalyzer)
add_subdirectory(Format)
if(CLANG_INCLUDE_TESTS)
diff --git a/clang/lib/CodeGen/BackendConsumer.h b/clang/lib/CodeGen/BackendConsumer.h
index 72a814c..fd0f198 100644
--- a/clang/lib/CodeGen/BackendConsumer.h
+++ b/clang/lib/CodeGen/BackendConsumer.h
@@ -34,6 +34,7 @@ class BackendConsumer : public ASTConsumer {
const CodeGenOptions &CodeGenOpts;
const TargetOptions &TargetOpts;
const LangOptions &LangOpts;
+ const FileManager &FileMgr;
std::unique_ptr<raw_pwrite_stream> AsmOutStream;
ASTContext *Context;
IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS;
@@ -74,8 +75,8 @@ public:
const HeaderSearchOptions &HeaderSearchOpts,
const PreprocessorOptions &PPOpts,
const CodeGenOptions &CodeGenOpts,
- const TargetOptions &TargetOpts,
- const LangOptions &LangOpts, const std::string &InFile,
+ const TargetOptions &TargetOpts, const LangOptions &LangOpts,
+ const FileManager &FileMgr, const std::string &InFile,
SmallVector<LinkModule, 4> LinkModules,
std::unique_ptr<raw_pwrite_stream> OS, llvm::LLVMContext &C,
CoverageSourceInfo *CoverageInfo = nullptr);
@@ -88,8 +89,8 @@ public:
const HeaderSearchOptions &HeaderSearchOpts,
const PreprocessorOptions &PPOpts,
const CodeGenOptions &CodeGenOpts,
- const TargetOptions &TargetOpts,
- const LangOptions &LangOpts, llvm::Module *Module,
+ const TargetOptions &TargetOpts, const LangOptions &LangOpts,
+ const FileManager &FileMgr, llvm::Module *Module,
SmallVector<LinkModule, 4> LinkModules, llvm::LLVMContext &C,
CoverageSourceInfo *CoverageInfo = nullptr);
@@ -111,10 +112,13 @@ public:
void AssignInheritanceModel(CXXRecordDecl *RD) override;
void HandleVTable(CXXRecordDecl *RD) override;
-
- // Links each entry in LinkModules into our module. Returns true on error.
+ // Links each entry in LinkModules into our module. Returns true on error.
bool LinkInModules(llvm::Module *M, bool ShouldLinkFiles = true);
+ // Load a bitcode module from -mlink-builtin-bitcode option using
+ // methods from a BackendConsumer instead of CompilerInstance
+ bool ReloadModules(llvm::Module *M);
+
/// Get the best possible source location to represent a diagnostic that
/// may have associated debug info.
const FullSourceLoc getBestLocationFromDebugLoc(
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index a7a410d..9bc6046 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -3443,6 +3443,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
return RValue::get(Builder.CreateCall(F));
}
+ case Builtin::BI__builtin_readsteadycounter: {
+ Function *F = CGM.getIntrinsic(Intrinsic::readsteadycounter);
+ return RValue::get(Builder.CreateCall(F));
+ }
case Builtin::BI__builtin___clear_cache: {
Value *Begin = EmitScalarExpr(E->getArg(0));
Value *End = EmitScalarExpr(E->getArg(1));
@@ -5908,8 +5912,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
}
- assert(ArgValue->getType()->canLosslesslyBitCastTo(PTy) &&
- "Must be able to losslessly bit cast to param");
// Cast vector type (e.g., v256i32) to x86_amx, this only happen
// in amx intrinsics.
if (PTy->isX86_AMXTy())
@@ -5939,8 +5941,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
}
- assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
- "Must be able to losslessly bit cast result type");
// Cast x86_amx to vector type (e.g., v256i32), this only happen
// in amx intrinsics.
if (V->getType()->isX86_AMXTy())
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index cd26a3d..d05cf1c 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -1301,27 +1301,25 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
// If coercing a fixed vector to a scalable vector for ABI compatibility, and
// the types match, use the llvm.vector.insert intrinsic to perform the
// conversion.
- if (auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(Ty)) {
- if (auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
- // If we are casting a fixed i8 vector to a scalable 16 x i1 predicate
+ if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(Ty)) {
+ if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
+ // If we are casting a fixed i8 vector to a scalable i1 predicate
// vector, use a vector insert and bitcast the result.
- bool NeedsBitcast = false;
- auto PredType =
- llvm::ScalableVectorType::get(CGF.Builder.getInt1Ty(), 16);
- llvm::Type *OrigType = Ty;
- if (ScalableDst == PredType &&
- FixedSrc->getElementType() == CGF.Builder.getInt8Ty()) {
- ScalableDst = llvm::ScalableVectorType::get(CGF.Builder.getInt8Ty(), 2);
- NeedsBitcast = true;
+ if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
+ ScalableDstTy->getElementCount().isKnownMultipleOf(8) &&
+ FixedSrcTy->getElementType()->isIntegerTy(8)) {
+ ScalableDstTy = llvm::ScalableVectorType::get(
+ FixedSrcTy->getElementType(),
+ ScalableDstTy->getElementCount().getKnownMinValue() / 8);
}
- if (ScalableDst->getElementType() == FixedSrc->getElementType()) {
+ if (ScalableDstTy->getElementType() == FixedSrcTy->getElementType()) {
auto *Load = CGF.Builder.CreateLoad(Src);
- auto *UndefVec = llvm::UndefValue::get(ScalableDst);
+ auto *UndefVec = llvm::UndefValue::get(ScalableDstTy);
auto *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
llvm::Value *Result = CGF.Builder.CreateInsertVector(
- ScalableDst, UndefVec, Load, Zero, "cast.scalable");
- if (NeedsBitcast)
- Result = CGF.Builder.CreateBitCast(Result, OrigType);
+ ScalableDstTy, UndefVec, Load, Zero, "cast.scalable");
+ if (ScalableDstTy != Ty)
+ Result = CGF.Builder.CreateBitCast(Result, Ty);
return Result;
}
}
@@ -3199,13 +3197,14 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::Value *Coerced = Fn->getArg(FirstIRArg);
if (auto *VecTyFrom =
dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) {
- // If we are casting a scalable 16 x i1 predicate vector to a fixed i8
+ // If we are casting a scalable i1 predicate vector to a fixed i8
// vector, bitcast the source and use a vector extract.
- auto PredType =
- llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
- if (VecTyFrom == PredType &&
+ if (VecTyFrom->getElementType()->isIntegerTy(1) &&
+ VecTyFrom->getElementCount().isKnownMultipleOf(8) &&
VecTyTo->getElementType() == Builder.getInt8Ty()) {
- VecTyFrom = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2);
+ VecTyFrom = llvm::ScalableVectorType::get(
+ VecTyTo->getElementType(),
+ VecTyFrom->getElementCount().getKnownMinValue() / 8);
Coerced = Builder.CreateBitCast(Coerced, VecTyFrom);
}
if (VecTyFrom->getElementType() == VecTyTo->getElementType()) {
@@ -5877,12 +5876,13 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// If coercing a fixed vector from a scalable vector for ABI
// compatibility, and the types match, use the llvm.vector.extract
// intrinsic to perform the conversion.
- if (auto *FixedDst = dyn_cast<llvm::FixedVectorType>(RetIRTy)) {
+ if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(RetIRTy)) {
llvm::Value *V = CI;
- if (auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(V->getType())) {
- if (FixedDst->getElementType() == ScalableSrc->getElementType()) {
+ if (auto *ScalableSrcTy =
+ dyn_cast<llvm::ScalableVectorType>(V->getType())) {
+ if (FixedDstTy->getElementType() == ScalableSrcTy->getElementType()) {
llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
- V = Builder.CreateExtractVector(FixedDst, V, Zero, "cast.fixed");
+ V = Builder.CreateExtractVector(FixedDstTy, V, Zero, "cast.fixed");
return RValue::get(V);
}
}
diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp
index fa03163..aa805f2 100644
--- a/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/clang/lib/CodeGen/CGExprScalar.cpp
@@ -2137,26 +2137,24 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
// If Src is a fixed vector and Dst is a scalable vector, and both have the
// same element type, use the llvm.vector.insert intrinsic to perform the
// bitcast.
- if (const auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
- if (const auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
- // If we are casting a fixed i8 vector to a scalable 16 x i1 predicate
+ if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
+ if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
+ // If we are casting a fixed i8 vector to a scalable i1 predicate
// vector, use a vector insert and bitcast the result.
- bool NeedsBitCast = false;
- auto PredType = llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
- llvm::Type *OrigType = DstTy;
- if (ScalableDst == PredType &&
- FixedSrc->getElementType() == Builder.getInt8Ty()) {
- DstTy = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2);
- ScalableDst = cast<llvm::ScalableVectorType>(DstTy);
- NeedsBitCast = true;
+ if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
+ ScalableDstTy->getElementCount().isKnownMultipleOf(8) &&
+ FixedSrcTy->getElementType()->isIntegerTy(8)) {
+ ScalableDstTy = llvm::ScalableVectorType::get(
+ FixedSrcTy->getElementType(),
+ ScalableDstTy->getElementCount().getKnownMinValue() / 8);
}
- if (FixedSrc->getElementType() == ScalableDst->getElementType()) {
- llvm::Value *UndefVec = llvm::UndefValue::get(DstTy);
+ if (FixedSrcTy->getElementType() == ScalableDstTy->getElementType()) {
+ llvm::Value *UndefVec = llvm::UndefValue::get(ScalableDstTy);
llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
llvm::Value *Result = Builder.CreateInsertVector(
- DstTy, UndefVec, Src, Zero, "cast.scalable");
- if (NeedsBitCast)
- Result = Builder.CreateBitCast(Result, OrigType);
+ ScalableDstTy, UndefVec, Src, Zero, "cast.scalable");
+ if (Result->getType() != DstTy)
+ Result = Builder.CreateBitCast(Result, DstTy);
return Result;
}
}
@@ -2165,18 +2163,19 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
// If Src is a scalable vector and Dst is a fixed vector, and both have the
// same element type, use the llvm.vector.extract intrinsic to perform the
// bitcast.
- if (const auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
- if (const auto *FixedDst = dyn_cast<llvm::FixedVectorType>(DstTy)) {
- // If we are casting a scalable 16 x i1 predicate vector to a fixed i8
+ if (auto *ScalableSrcTy = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
+ if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(DstTy)) {
+ // If we are casting a scalable i1 predicate vector to a fixed i8
// vector, bitcast the source and use a vector extract.
- auto PredType = llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
- if (ScalableSrc == PredType &&
- FixedDst->getElementType() == Builder.getInt8Ty()) {
- SrcTy = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2);
- ScalableSrc = cast<llvm::ScalableVectorType>(SrcTy);
- Src = Builder.CreateBitCast(Src, SrcTy);
+ if (ScalableSrcTy->getElementType()->isIntegerTy(1) &&
+ ScalableSrcTy->getElementCount().isKnownMultipleOf(8) &&
+ FixedDstTy->getElementType()->isIntegerTy(8)) {
+ ScalableSrcTy = llvm::ScalableVectorType::get(
+ FixedDstTy->getElementType(),
+ ScalableSrcTy->getElementCount().getKnownMinValue() / 8);
+ Src = Builder.CreateBitCast(Src, ScalableSrcTy);
}
- if (ScalableSrc->getElementType() == FixedDst->getElementType()) {
+ if (ScalableSrcTy->getElementType() == FixedDstTy->getElementType()) {
llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
return Builder.CreateExtractVector(DstTy, Src, Zero, "cast.fixed");
}
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index beff0ad..af51875 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -435,6 +435,9 @@ void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
case Stmt::OMPParallelMaskedDirectiveClass:
EmitOMPParallelMaskedDirective(cast<OMPParallelMaskedDirective>(*S));
break;
+ case Stmt::OpenACCComputeConstructClass:
+ EmitOpenACCComputeConstruct(cast<OpenACCComputeConstruct>(*S));
+ break;
}
}
diff --git a/clang/lib/CodeGen/CodeGenAction.cpp b/clang/lib/CodeGen/CodeGenAction.cpp
index f8038497..bb9aaba 100644
--- a/clang/lib/CodeGen/CodeGenAction.cpp
+++ b/clang/lib/CodeGen/CodeGenAction.cpp
@@ -109,56 +109,52 @@ static void reportOptRecordError(Error E, DiagnosticsEngine &Diags,
});
}
-BackendConsumer::BackendConsumer(BackendAction Action, DiagnosticsEngine &Diags,
- IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
- const HeaderSearchOptions &HeaderSearchOpts,
- const PreprocessorOptions &PPOpts,
- const CodeGenOptions &CodeGenOpts,
- const TargetOptions &TargetOpts,
- const LangOptions &LangOpts,
- const std::string &InFile,
- SmallVector<LinkModule, 4> LinkModules,
- std::unique_ptr<raw_pwrite_stream> OS,
- LLVMContext &C,
- CoverageSourceInfo *CoverageInfo)
- : Diags(Diags), Action(Action), HeaderSearchOpts(HeaderSearchOpts),
- CodeGenOpts(CodeGenOpts), TargetOpts(TargetOpts), LangOpts(LangOpts),
- AsmOutStream(std::move(OS)), Context(nullptr), FS(VFS),
- LLVMIRGeneration("irgen", "LLVM IR Generation Time"),
- LLVMIRGenerationRefCount(0),
- Gen(CreateLLVMCodeGen(Diags, InFile, std::move(VFS), HeaderSearchOpts,
- PPOpts, CodeGenOpts, C, CoverageInfo)),
- LinkModules(std::move(LinkModules)) {
- TimerIsEnabled = CodeGenOpts.TimePasses;
- llvm::TimePassesIsEnabled = CodeGenOpts.TimePasses;
- llvm::TimePassesPerRun = CodeGenOpts.TimePassesPerRun;
+BackendConsumer::BackendConsumer(
+ BackendAction Action, DiagnosticsEngine &Diags,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
+ const HeaderSearchOptions &HeaderSearchOpts,
+ const PreprocessorOptions &PPOpts, const CodeGenOptions &CodeGenOpts,
+ const TargetOptions &TargetOpts, const LangOptions &LangOpts,
+ const FileManager &FileMgr, const std::string &InFile,
+ SmallVector<LinkModule, 4> LinkModules,
+ std::unique_ptr<raw_pwrite_stream> OS, LLVMContext &C,
+ CoverageSourceInfo *CoverageInfo)
+ : Diags(Diags), Action(Action), HeaderSearchOpts(HeaderSearchOpts),
+ CodeGenOpts(CodeGenOpts), TargetOpts(TargetOpts), LangOpts(LangOpts),
+ FileMgr(FileMgr), AsmOutStream(std::move(OS)), Context(nullptr), FS(VFS),
+ LLVMIRGeneration("irgen", "LLVM IR Generation Time"),
+ LLVMIRGenerationRefCount(0),
+ Gen(CreateLLVMCodeGen(Diags, InFile, std::move(VFS), HeaderSearchOpts,
+ PPOpts, CodeGenOpts, C, CoverageInfo)),
+ LinkModules(std::move(LinkModules)) {
+ TimerIsEnabled = CodeGenOpts.TimePasses;
+ llvm::TimePassesIsEnabled = CodeGenOpts.TimePasses;
+ llvm::TimePassesPerRun = CodeGenOpts.TimePassesPerRun;
}
// This constructor is used in installing an empty BackendConsumer
// to use the clang diagnostic handler for IR input files. It avoids
// initializing the OS field.
-BackendConsumer::BackendConsumer(BackendAction Action, DiagnosticsEngine &Diags,
- IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
- const HeaderSearchOptions &HeaderSearchOpts,
- const PreprocessorOptions &PPOpts,
- const CodeGenOptions &CodeGenOpts,
- const TargetOptions &TargetOpts,
- const LangOptions &LangOpts,
- llvm::Module *Module,
- SmallVector<LinkModule, 4> LinkModules,
- LLVMContext &C,
- CoverageSourceInfo *CoverageInfo)
- : Diags(Diags), Action(Action), HeaderSearchOpts(HeaderSearchOpts),
- CodeGenOpts(CodeGenOpts), TargetOpts(TargetOpts), LangOpts(LangOpts),
- Context(nullptr), FS(VFS),
- LLVMIRGeneration("irgen", "LLVM IR Generation Time"),
- LLVMIRGenerationRefCount(0),
- Gen(CreateLLVMCodeGen(Diags, "", std::move(VFS), HeaderSearchOpts,
- PPOpts, CodeGenOpts, C, CoverageInfo)),
- LinkModules(std::move(LinkModules)), CurLinkModule(Module) {
- TimerIsEnabled = CodeGenOpts.TimePasses;
- llvm::TimePassesIsEnabled = CodeGenOpts.TimePasses;
- llvm::TimePassesPerRun = CodeGenOpts.TimePassesPerRun;
+BackendConsumer::BackendConsumer(
+ BackendAction Action, DiagnosticsEngine &Diags,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
+ const HeaderSearchOptions &HeaderSearchOpts,
+ const PreprocessorOptions &PPOpts, const CodeGenOptions &CodeGenOpts,
+ const TargetOptions &TargetOpts, const LangOptions &LangOpts,
+ const FileManager &FileMgr, llvm::Module *Module,
+ SmallVector<LinkModule, 4> LinkModules, LLVMContext &C,
+ CoverageSourceInfo *CoverageInfo)
+ : Diags(Diags), Action(Action), HeaderSearchOpts(HeaderSearchOpts),
+ CodeGenOpts(CodeGenOpts), TargetOpts(TargetOpts), LangOpts(LangOpts),
+ FileMgr(FileMgr), Context(nullptr), FS(VFS),
+ LLVMIRGeneration("irgen", "LLVM IR Generation Time"),
+ LLVMIRGenerationRefCount(0),
+ Gen(CreateLLVMCodeGen(Diags, "", std::move(VFS), HeaderSearchOpts, PPOpts,
+ CodeGenOpts, C, CoverageInfo)),
+ LinkModules(std::move(LinkModules)), CurLinkModule(Module) {
+ TimerIsEnabled = CodeGenOpts.TimePasses;
+ llvm::TimePassesIsEnabled = CodeGenOpts.TimePasses;
+ llvm::TimePassesPerRun = CodeGenOpts.TimePassesPerRun;
}
llvm::Module* BackendConsumer::getModule() const {
@@ -233,9 +229,37 @@ void BackendConsumer::HandleInterestingDecl(DeclGroupRef D) {
HandleTopLevelDecl(D);
}
+bool BackendConsumer::ReloadModules(llvm::Module *M) {
+ for (const CodeGenOptions::BitcodeFileToLink &F :
+ CodeGenOpts.LinkBitcodeFiles) {
+ auto BCBuf = FileMgr.getBufferForFile(F.Filename);
+ if (!BCBuf) {
+ Diags.Report(diag::err_cannot_open_file)
+ << F.Filename << BCBuf.getError().message();
+ LinkModules.clear();
+ return true;
+ }
+
+ LLVMContext &Ctx = getModule()->getContext();
+ Expected<std::unique_ptr<llvm::Module>> ModuleOrErr =
+ getOwningLazyBitcodeModule(std::move(*BCBuf), Ctx);
+
+ if (!ModuleOrErr) {
+ handleAllErrors(ModuleOrErr.takeError(), [&](ErrorInfoBase &EIB) {
+ Diags.Report(diag::err_cannot_open_file) << F.Filename << EIB.message();
+ });
+ LinkModules.clear();
+ return true;
+ }
+ LinkModules.push_back({std::move(ModuleOrErr.get()), F.PropagateAttrs,
+ F.Internalize, F.LinkFlags});
+ }
+
+ return false; // success
+}
+
// Links each entry in LinkModules into our module. Returns true on error.
bool BackendConsumer::LinkInModules(llvm::Module *M, bool ShouldLinkFiles) {
-
for (auto &LM : LinkModules) {
assert(LM.Module && "LinkModule does not actually have a module");
@@ -257,37 +281,22 @@ bool BackendConsumer::LinkInModules(llvm::Module *M, bool ShouldLinkFiles) {
CurLinkModule = LM.Module.get();
bool Err;
- auto DoLink = [&](auto &Mod) {
- if (LM.Internalize) {
- Err = Linker::linkModules(
- *M, std::move(Mod), LM.LinkFlags,
- [](llvm::Module &M, const llvm::StringSet<> &GVS) {
- internalizeModule(M, [&GVS](const llvm::GlobalValue &GV) {
- return !GV.hasName() || (GVS.count(GV.getName()) == 0);
- });
+ if (LM.Internalize) {
+ Err = Linker::linkModules(
+ *M, std::move(LM.Module), LM.LinkFlags,
+ [](llvm::Module &M, const llvm::StringSet<> &GVS) {
+ internalizeModule(M, [&GVS](const llvm::GlobalValue &GV) {
+ return !GV.hasName() || (GVS.count(GV.getName()) == 0);
});
- } else
- Err = Linker::linkModules(*M, std::move(Mod), LM.LinkFlags);
- };
-
- // Create a Clone to move to the linker, which preserves the original
- // linking modules, allowing them to be linked again in the future
- if (ClRelinkBuiltinBitcodePostop) {
- // TODO: If CloneModule() is updated to support cloning of unmaterialized
- // modules, we can remove this
- if (Error E = CurLinkModule->materializeAll())
- return false;
-
- std::unique_ptr<llvm::Module> Clone = llvm::CloneModule(*LM.Module);
+ });
+ } else
+ Err = Linker::linkModules(*M, std::move(LM.Module), LM.LinkFlags);
- DoLink(Clone);
- }
- // Otherwise we can link (and clean up) the original modules
- else {
- DoLink(LM.Module);
- }
+ if (Err)
+ return true;
}
+ LinkModules.clear();
return false; // success
}
@@ -1037,8 +1046,9 @@ CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
std::unique_ptr<BackendConsumer> Result(new BackendConsumer(
BA, CI.getDiagnostics(), &CI.getVirtualFileSystem(),
CI.getHeaderSearchOpts(), CI.getPreprocessorOpts(), CI.getCodeGenOpts(),
- CI.getTargetOpts(), CI.getLangOpts(), std::string(InFile),
- std::move(LinkModules), std::move(OS), *VMContext, CoverageInfo));
+ CI.getTargetOpts(), CI.getLangOpts(), CI.getFileManager(),
+ std::string(InFile), std::move(LinkModules), std::move(OS), *VMContext,
+ CoverageInfo));
BEConsumer = Result.get();
// Enable generating macro debug info only when debug info is not disabled and
@@ -1199,7 +1209,7 @@ void CodeGenAction::ExecuteAction() {
BackendConsumer Result(BA, CI.getDiagnostics(), &CI.getVirtualFileSystem(),
CI.getHeaderSearchOpts(), CI.getPreprocessorOpts(),
CI.getCodeGenOpts(), CI.getTargetOpts(),
- CI.getLangOpts(), TheModule.get(),
+ CI.getLangOpts(), CI.getFileManager(), TheModule.get(),
std::move(LinkModules), *VMContext, nullptr);
// Link in each pending link module.
diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h
index fc9b328..caa6a32 100644
--- a/clang/lib/CodeGen/CodeGenFunction.h
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -26,6 +26,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
+#include "clang/AST/StmtOpenACC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/Type.h"
#include "clang/Basic/ABI.h"
@@ -3840,6 +3841,15 @@ private:
void EmitSections(const OMPExecutableDirective &S);
public:
+ //===--------------------------------------------------------------------===//
+ // OpenACC Emission
+ //===--------------------------------------------------------------------===//
+ void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S) {
+ // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
+ // simply emitting its structured block, but in the future we will implement
+ // some sort of IR.
+ EmitStmt(S.getStructuredBlock());
+ }
//===--------------------------------------------------------------------===//
// LValue Expression Emission
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 2f923d5..c984260 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -68,6 +68,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/RISCVISAInfo.h"
#include "llvm/Support/TimeProfiler.h"
#include "llvm/Support/xxhash.h"
#include "llvm/TargetParser/Triple.h"
@@ -1057,6 +1058,19 @@ void CodeGenModule::Release() {
llvm::LLVMContext &Ctx = TheModule.getContext();
getModule().addModuleFlag(llvm::Module::Error, "target-abi",
llvm::MDString::get(Ctx, ABIStr));
+
+ // Add the canonical ISA string as metadata so the backend can set the ELF
+ // attributes correctly. We use AppendUnique so LTO will keep all of the
+ // unique ISA strings that were linked together.
+ const std::vector<std::string> &Features =
+ getTarget().getTargetOpts().Features;
+ auto ParseResult =
+ llvm::RISCVISAInfo::parseFeatures(T.isRISCV64() ? 64 : 32, Features);
+ if (!errorToBool(ParseResult.takeError()))
+ getModule().addModuleFlag(
+ llvm::Module::AppendUnique, "riscv-isa",
+ llvm::MDNode::get(
+ Ctx, llvm::MDString::get(Ctx, (*ParseResult)->toString())));
}
if (CodeGenOpts.SanitizeCfiCrossDso) {
diff --git a/clang/lib/CodeGen/CodeGenPGO.cpp b/clang/lib/CodeGen/CodeGenPGO.cpp
index 5d7c384..48c5e68 100644
--- a/clang/lib/CodeGen/CodeGenPGO.cpp
+++ b/clang/lib/CodeGen/CodeGenPGO.cpp
@@ -165,8 +165,7 @@ struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
llvm::DenseMap<const Stmt *, unsigned> &CounterMap;
/// The next bitmap byte index to assign.
unsigned NextMCDCBitmapIdx;
- /// The map of statements to MC/DC bitmap coverage objects.
- llvm::DenseMap<const Stmt *, unsigned> &MCDCBitmapMap;
+ MCDC::State &MCDCState;
/// Maximum number of supported MC/DC conditions in a boolean expression.
unsigned MCDCMaxCond;
/// The profile version.
@@ -176,11 +175,11 @@ struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
MapRegionCounters(PGOHashVersion HashVersion, uint64_t ProfileVersion,
llvm::DenseMap<const Stmt *, unsigned> &CounterMap,
- llvm::DenseMap<const Stmt *, unsigned> &MCDCBitmapMap,
- unsigned MCDCMaxCond, DiagnosticsEngine &Diag)
+ MCDC::State &MCDCState, unsigned MCDCMaxCond,
+ DiagnosticsEngine &Diag)
: NextCounter(0), Hash(HashVersion), CounterMap(CounterMap),
- NextMCDCBitmapIdx(0), MCDCBitmapMap(MCDCBitmapMap),
- MCDCMaxCond(MCDCMaxCond), ProfileVersion(ProfileVersion), Diag(Diag) {}
+ NextMCDCBitmapIdx(0), MCDCState(MCDCState), MCDCMaxCond(MCDCMaxCond),
+ ProfileVersion(ProfileVersion), Diag(Diag) {}
// Blocks and lambdas are handled as separate functions, so we need not
// traverse them in the parent context.
@@ -309,7 +308,7 @@ struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
// Otherwise, allocate the number of bytes required for the bitmap
// based on the number of conditions. Must be at least 1-byte long.
- MCDCBitmapMap[BinOp] = NextMCDCBitmapIdx;
+ MCDCState.BitmapMap[BinOp] = NextMCDCBitmapIdx;
unsigned SizeInBits = std::max<unsigned>(1L << NumCond, CHAR_BIT);
NextMCDCBitmapIdx += SizeInBits / CHAR_BIT;
}
@@ -987,10 +986,9 @@ void CodeGenPGO::mapRegionCounters(const Decl *D) {
unsigned MCDCMaxConditions = (CGM.getCodeGenOpts().MCDCCoverage) ? 6 : 0;
RegionCounterMap.reset(new llvm::DenseMap<const Stmt *, unsigned>);
- RegionMCDCBitmapMap.reset(new llvm::DenseMap<const Stmt *, unsigned>);
+ RegionMCDCState.reset(new MCDC::State);
MapRegionCounters Walker(HashVersion, ProfileVersion, *RegionCounterMap,
- *RegionMCDCBitmapMap, MCDCMaxConditions,
- CGM.getDiags());
+ *RegionMCDCState, MCDCMaxConditions, CGM.getDiags());
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
Walker.TraverseDecl(const_cast<FunctionDecl *>(FD));
else if (const ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(D))
@@ -1001,7 +999,7 @@ void CodeGenPGO::mapRegionCounters(const Decl *D) {
Walker.TraverseDecl(const_cast<CapturedDecl *>(CD));
assert(Walker.NextCounter > 0 && "no entry counter mapped for decl");
NumRegionCounters = Walker.NextCounter;
- MCDCBitmapBytes = Walker.NextMCDCBitmapIdx;
+ RegionMCDCState->BitmapBytes = Walker.NextMCDCBitmapIdx;
FunctionHash = Walker.Hash.finalize();
}
@@ -1033,11 +1031,10 @@ void CodeGenPGO::emitCounterRegionMapping(const Decl *D) {
std::string CoverageMapping;
llvm::raw_string_ostream OS(CoverageMapping);
- RegionCondIDMap.reset(new llvm::DenseMap<const Stmt *, unsigned>);
+ RegionCondIDMap.reset(new llvm::DenseMap<const Stmt *, int16_t>);
CoverageMappingGen MappingGen(
*CGM.getCoverageMapping(), CGM.getContext().getSourceManager(),
- CGM.getLangOpts(), RegionCounterMap.get(), RegionMCDCBitmapMap.get(),
- RegionCondIDMap.get());
+ CGM.getLangOpts(), RegionCounterMap.get(), RegionMCDCState.get());
MappingGen.emitCounterMapping(D, OS);
OS.flush();
@@ -1119,7 +1116,7 @@ bool CodeGenPGO::canEmitMCDCCoverage(const CGBuilderTy &Builder) {
}
void CodeGenPGO::emitMCDCParameters(CGBuilderTy &Builder) {
- if (!canEmitMCDCCoverage(Builder) || !RegionMCDCBitmapMap)
+ if (!canEmitMCDCCoverage(Builder) || !RegionMCDCState)
return;
auto *I8PtrTy = llvm::PointerType::getUnqual(CGM.getLLVMContext());
@@ -1129,7 +1126,7 @@ void CodeGenPGO::emitMCDCParameters(CGBuilderTy &Builder) {
// anything.
llvm::Value *Args[3] = {llvm::ConstantExpr::getBitCast(FuncNameVar, I8PtrTy),
Builder.getInt64(FunctionHash),
- Builder.getInt32(MCDCBitmapBytes)};
+ Builder.getInt32(RegionMCDCState->BitmapBytes)};
Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::instrprof_mcdc_parameters), Args);
}
@@ -1137,13 +1134,13 @@ void CodeGenPGO::emitMCDCParameters(CGBuilderTy &Builder) {
void CodeGenPGO::emitMCDCTestVectorBitmapUpdate(CGBuilderTy &Builder,
const Expr *S,
Address MCDCCondBitmapAddr) {
- if (!canEmitMCDCCoverage(Builder) || !RegionMCDCBitmapMap)
+ if (!canEmitMCDCCoverage(Builder) || !RegionMCDCState)
return;
S = S->IgnoreParens();
- auto ExprMCDCBitmapMapIterator = RegionMCDCBitmapMap->find(S);
- if (ExprMCDCBitmapMapIterator == RegionMCDCBitmapMap->end())
+ auto ExprMCDCBitmapMapIterator = RegionMCDCState->BitmapMap.find(S);
+ if (ExprMCDCBitmapMapIterator == RegionMCDCState->BitmapMap.end())
return;
// Extract the ID of the global bitmap associated with this expression.
@@ -1157,7 +1154,7 @@ void CodeGenPGO::emitMCDCTestVectorBitmapUpdate(CGBuilderTy &Builder,
// index represents an executed test vector.
llvm::Value *Args[5] = {llvm::ConstantExpr::getBitCast(FuncNameVar, I8PtrTy),
Builder.getInt64(FunctionHash),
- Builder.getInt32(MCDCBitmapBytes),
+ Builder.getInt32(RegionMCDCState->BitmapBytes),
Builder.getInt32(MCDCTestVectorBitmapID),
MCDCCondBitmapAddr.getPointer()};
Builder.CreateCall(
@@ -1166,12 +1163,12 @@ void CodeGenPGO::emitMCDCTestVectorBitmapUpdate(CGBuilderTy &Builder,
void CodeGenPGO::emitMCDCCondBitmapReset(CGBuilderTy &Builder, const Expr *S,
Address MCDCCondBitmapAddr) {
- if (!canEmitMCDCCoverage(Builder) || !RegionMCDCBitmapMap)
+ if (!canEmitMCDCCoverage(Builder) || !RegionMCDCState)
return;
S = S->IgnoreParens();
- if (RegionMCDCBitmapMap->find(S) == RegionMCDCBitmapMap->end())
+ if (!RegionMCDCState->BitmapMap.contains(S))
return;
// Emit intrinsic that resets a dedicated temporary value on the stack to 0.
@@ -1181,7 +1178,7 @@ void CodeGenPGO::emitMCDCCondBitmapReset(CGBuilderTy &Builder, const Expr *S,
void CodeGenPGO::emitMCDCCondBitmapUpdate(CGBuilderTy &Builder, const Expr *S,
Address MCDCCondBitmapAddr,
llvm::Value *Val) {
- if (!canEmitMCDCCoverage(Builder) || !RegionCondIDMap)
+ if (!canEmitMCDCCoverage(Builder) || !RegionMCDCState)
return;
// Even though, for simplicity, parentheses and unary logical-NOT operators
@@ -1193,13 +1190,13 @@ void CodeGenPGO::emitMCDCCondBitmapUpdate(CGBuilderTy &Builder, const Expr *S,
// also make debugging a bit easier.
S = CodeGenFunction::stripCond(S);
- auto ExprMCDCConditionIDMapIterator = RegionCondIDMap->find(S);
- if (ExprMCDCConditionIDMapIterator == RegionCondIDMap->end())
+ auto ExprMCDCConditionIDMapIterator = RegionMCDCState->CondIDMap.find(S);
+ if (ExprMCDCConditionIDMapIterator == RegionMCDCState->CondIDMap.end())
return;
// Extract the ID of the condition we are setting in the bitmap.
- unsigned CondID = ExprMCDCConditionIDMapIterator->second;
- assert(CondID > 0 && "Condition has no ID!");
+ auto CondID = ExprMCDCConditionIDMapIterator->second;
+ assert(CondID >= 0 && "Condition has no ID!");
auto *I8PtrTy = llvm::PointerType::getUnqual(CGM.getLLVMContext());
@@ -1208,7 +1205,7 @@ void CodeGenPGO::emitMCDCCondBitmapUpdate(CGBuilderTy &Builder, const Expr *S,
// the resulting value is used to update the boolean expression's bitmap.
llvm::Value *Args[5] = {llvm::ConstantExpr::getBitCast(FuncNameVar, I8PtrTy),
Builder.getInt64(FunctionHash),
- Builder.getInt32(CondID - 1),
+ Builder.getInt32(CondID),
MCDCCondBitmapAddr.getPointer(), Val};
Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::instrprof_mcdc_condbitmap_update),
diff --git a/clang/lib/CodeGen/CodeGenPGO.h b/clang/lib/CodeGen/CodeGenPGO.h
index 6596b6c..369bf05 100644
--- a/clang/lib/CodeGen/CodeGenPGO.h
+++ b/clang/lib/CodeGen/CodeGenPGO.h
@@ -16,6 +16,7 @@
#include "CGBuilder.h"
#include "CodeGenModule.h"
#include "CodeGenTypes.h"
+#include "MCDCState.h"
#include "llvm/ProfileData/InstrProfReader.h"
#include <array>
#include <memory>
@@ -33,21 +34,20 @@ private:
std::array <unsigned, llvm::IPVK_Last + 1> NumValueSites;
unsigned NumRegionCounters;
- unsigned MCDCBitmapBytes;
uint64_t FunctionHash;
std::unique_ptr<llvm::DenseMap<const Stmt *, unsigned>> RegionCounterMap;
std::unique_ptr<llvm::DenseMap<const Stmt *, unsigned>> RegionMCDCBitmapMap;
- std::unique_ptr<llvm::DenseMap<const Stmt *, unsigned>> RegionCondIDMap;
+ std::unique_ptr<llvm::DenseMap<const Stmt *, int16_t>> RegionCondIDMap;
std::unique_ptr<llvm::DenseMap<const Stmt *, uint64_t>> StmtCountMap;
std::unique_ptr<llvm::InstrProfRecord> ProfRecord;
+ std::unique_ptr<MCDC::State> RegionMCDCState;
std::vector<uint64_t> RegionCounts;
uint64_t CurrentRegionCount;
public:
CodeGenPGO(CodeGenModule &CGModule)
: CGM(CGModule), FuncNameVar(nullptr), NumValueSites({{0}}),
- NumRegionCounters(0), MCDCBitmapBytes(0), FunctionHash(0),
- CurrentRegionCount(0) {}
+ NumRegionCounters(0), FunctionHash(0), CurrentRegionCount(0) {}
/// Whether or not we have PGO region data for the current function. This is
/// false both when we have no data at all and when our data has been
diff --git a/clang/lib/CodeGen/CoverageMappingGen.cpp b/clang/lib/CodeGen/CoverageMappingGen.cpp
index 0c43317..fdf821a 100644
--- a/clang/lib/CodeGen/CoverageMappingGen.cpp
+++ b/clang/lib/CodeGen/CoverageMappingGen.cpp
@@ -95,9 +95,6 @@ void CoverageSourceInfo::updateNextTokLoc(SourceLocation Loc) {
}
namespace {
-using MCDCConditionID = CounterMappingRegion::MCDCConditionID;
-using MCDCParameters = CounterMappingRegion::MCDCParameters;
-
/// A region of source code that can be mapped to a counter.
class SourceMappingRegion {
/// Primary Counter that is also used for Branch Regions for "True" branches.
@@ -107,7 +104,7 @@ class SourceMappingRegion {
std::optional<Counter> FalseCount;
/// Parameters used for Modified Condition/Decision Coverage
- MCDCParameters MCDCParams;
+ mcdc::Parameters MCDCParams;
/// The region's starting location.
std::optional<SourceLocation> LocStart;
@@ -131,7 +128,7 @@ public:
SkippedRegion(false) {}
SourceMappingRegion(Counter Count, std::optional<Counter> FalseCount,
- MCDCParameters MCDCParams,
+ mcdc::Parameters MCDCParams,
std::optional<SourceLocation> LocStart,
std::optional<SourceLocation> LocEnd,
bool GapRegion = false)
@@ -139,7 +136,7 @@ public:
LocStart(LocStart), LocEnd(LocEnd), GapRegion(GapRegion),
SkippedRegion(false) {}
- SourceMappingRegion(MCDCParameters MCDCParams,
+ SourceMappingRegion(mcdc::Parameters MCDCParams,
std::optional<SourceLocation> LocStart,
std::optional<SourceLocation> LocEnd)
: MCDCParams(MCDCParams), LocStart(LocStart), LocEnd(LocEnd),
@@ -185,9 +182,19 @@ public:
bool isBranch() const { return FalseCount.has_value(); }
- bool isMCDCDecision() const { return MCDCParams.NumConditions != 0; }
+ bool isMCDCDecision() const {
+ const auto *DecisionParams =
+ std::get_if<mcdc::DecisionParameters>(&MCDCParams);
+ assert(!DecisionParams || DecisionParams->NumConditions > 0);
+ return DecisionParams;
+ }
+
+ const auto &getMCDCDecisionParams() const {
+ return CounterMappingRegion::getParams<const mcdc::DecisionParameters>(
+ MCDCParams);
+ }
- const MCDCParameters &getMCDCParams() const { return MCDCParams; }
+ const mcdc::Parameters &getMCDCParams() const { return MCDCParams; }
};
/// Spelling locations for the start and end of a source region.
@@ -483,13 +490,13 @@ public:
SR.ColumnEnd));
} else if (Region.isBranch()) {
MappingRegions.push_back(CounterMappingRegion::makeBranchRegion(
- Region.getCounter(), Region.getFalseCounter(),
- Region.getMCDCParams(), *CovFileID, SR.LineStart, SR.ColumnStart,
- SR.LineEnd, SR.ColumnEnd));
+ Region.getCounter(), Region.getFalseCounter(), *CovFileID,
+ SR.LineStart, SR.ColumnStart, SR.LineEnd, SR.ColumnEnd,
+ Region.getMCDCParams()));
} else if (Region.isMCDCDecision()) {
MappingRegions.push_back(CounterMappingRegion::makeDecisionRegion(
- Region.getMCDCParams(), *CovFileID, SR.LineStart, SR.ColumnStart,
- SR.LineEnd, SR.ColumnEnd));
+ Region.getMCDCDecisionParams(), *CovFileID, SR.LineStart,
+ SR.ColumnStart, SR.LineEnd, SR.ColumnEnd));
} else {
MappingRegions.push_back(CounterMappingRegion::makeRegion(
Region.getCounter(), *CovFileID, SR.LineStart, SR.ColumnStart,
@@ -586,11 +593,6 @@ struct EmptyCoverageMappingBuilder : public CoverageMappingBuilder {
/// creation.
struct MCDCCoverageBuilder {
- struct DecisionIDPair {
- MCDCConditionID TrueID = 0;
- MCDCConditionID FalseID = 0;
- };
-
/// The AST walk recursively visits nested logical-AND or logical-OR binary
/// operator nodes and then visits their LHS and RHS children nodes. As this
/// happens, the algorithm will assign IDs to each operator's LHS and RHS side
@@ -681,14 +683,15 @@ struct MCDCCoverageBuilder {
private:
CodeGenModule &CGM;
- llvm::SmallVector<DecisionIDPair> DecisionStack;
- llvm::DenseMap<const Stmt *, MCDCConditionID> &CondIDs;
- llvm::DenseMap<const Stmt *, unsigned> &MCDCBitmapMap;
- MCDCConditionID NextID = 1;
+ llvm::SmallVector<mcdc::ConditionIDs> DecisionStack;
+ MCDC::State &MCDCState;
+ llvm::DenseMap<const Stmt *, mcdc::ConditionID> &CondIDs;
+ mcdc::ConditionID NextID = 0;
bool NotMapped = false;
- /// Represent a sentinel value of [0,0] for the bottom of DecisionStack.
- static constexpr DecisionIDPair DecisionStackSentinel{0, 0};
+ /// Represent a sentinel value as a pair of final decisions for the bottom
+ // of DecisionStack.
+ static constexpr mcdc::ConditionIDs DecisionStackSentinel{-1, -1};
/// Is this a logical-AND operation?
bool isLAnd(const BinaryOperator *E) const {
@@ -696,38 +699,36 @@ private:
}
public:
- MCDCCoverageBuilder(CodeGenModule &CGM,
- llvm::DenseMap<const Stmt *, MCDCConditionID> &CondIDMap,
- llvm::DenseMap<const Stmt *, unsigned> &MCDCBitmapMap)
- : CGM(CGM), DecisionStack(1, DecisionStackSentinel), CondIDs(CondIDMap),
- MCDCBitmapMap(MCDCBitmapMap) {}
+ MCDCCoverageBuilder(CodeGenModule &CGM, MCDC::State &MCDCState)
+ : CGM(CGM), DecisionStack(1, DecisionStackSentinel), MCDCState(MCDCState),
+ CondIDs(MCDCState.CondIDMap) {}
/// Return whether the build of the control flow map is at the top-level
/// (root) of a logical operator nest in a boolean expression prior to the
/// assignment of condition IDs.
- bool isIdle() const { return (NextID == 1 && !NotMapped); }
+ bool isIdle() const { return (NextID == 0 && !NotMapped); }
/// Return whether any IDs have been assigned in the build of the control
/// flow map, indicating that the map is being generated for this boolean
/// expression.
- bool isBuilding() const { return (NextID > 1); }
+ bool isBuilding() const { return (NextID > 0); }
/// Set the given condition's ID.
- void setCondID(const Expr *Cond, MCDCConditionID ID) {
+ void setCondID(const Expr *Cond, mcdc::ConditionID ID) {
CondIDs[CodeGenFunction::stripCond(Cond)] = ID;
}
/// Return the ID of a given condition.
- MCDCConditionID getCondID(const Expr *Cond) const {
+ mcdc::ConditionID getCondID(const Expr *Cond) const {
auto I = CondIDs.find(CodeGenFunction::stripCond(Cond));
if (I == CondIDs.end())
- return 0;
+ return -1;
else
return I->second;
}
/// Return the LHS Decision ([0,0] if not set).
- const DecisionIDPair &back() const { return DecisionStack.back(); }
+ const mcdc::ConditionIDs &back() const { return DecisionStack.back(); }
/// Push the binary operator statement to track the nest level and assign IDs
/// to the operator's LHS and RHS. The RHS may be a larger subtree that is
@@ -737,14 +738,15 @@ public:
return;
// If binary expression is disqualified, don't do mapping.
- if (!isBuilding() && !MCDCBitmapMap.contains(CodeGenFunction::stripCond(E)))
+ if (!isBuilding() &&
+ !MCDCState.BitmapMap.contains(CodeGenFunction::stripCond(E)))
NotMapped = true;
// Don't go any further if we don't need to map condition IDs.
if (NotMapped)
return;
- const DecisionIDPair &ParentDecision = DecisionStack.back();
+ const mcdc::ConditionIDs &ParentDecision = DecisionStack.back();
// If the operator itself has an assigned ID, this means it represents a
// larger subtree. In this case, assign that ID to its LHS node. Its RHS
@@ -755,23 +757,23 @@ public:
setCondID(E->getLHS(), NextID++);
// Assign a ID+1 for the RHS.
- MCDCConditionID RHSid = NextID++;
+ mcdc::ConditionID RHSid = NextID++;
setCondID(E->getRHS(), RHSid);
// Push the LHS decision IDs onto the DecisionStack.
if (isLAnd(E))
- DecisionStack.push_back({RHSid, ParentDecision.FalseID});
+ DecisionStack.push_back({ParentDecision[false], RHSid});
else
- DecisionStack.push_back({ParentDecision.TrueID, RHSid});
+ DecisionStack.push_back({RHSid, ParentDecision[true]});
}
/// Pop and return the LHS Decision ([0,0] if not set).
- DecisionIDPair pop() {
+ mcdc::ConditionIDs pop() {
if (!CGM.getCodeGenOpts().MCDCCoverage || NotMapped)
return DecisionStack.front();
assert(DecisionStack.size() > 1);
- DecisionIDPair D = DecisionStack.back();
+ mcdc::ConditionIDs D = DecisionStack.back();
DecisionStack.pop_back();
return D;
}
@@ -788,15 +790,15 @@ public:
// Reset state if not doing mapping.
if (NotMapped) {
NotMapped = false;
- assert(NextID == 1);
+ assert(NextID == 0);
return 0;
}
// Set number of conditions and reset.
- unsigned TotalConds = NextID - 1;
+ unsigned TotalConds = NextID;
// Reset ID back to beginning.
- NextID = 1;
+ NextID = 0;
return TotalConds;
}
@@ -810,8 +812,7 @@ struct CounterCoverageMappingBuilder
/// The map of statements to count values.
llvm::DenseMap<const Stmt *, unsigned> &CounterMap;
- /// The map of statements to bitmap coverage object values.
- llvm::DenseMap<const Stmt *, unsigned> &MCDCBitmapMap;
+ MCDC::State &MCDCState;
/// A stack of currently live regions.
llvm::SmallVector<SourceMappingRegion> RegionStack;
@@ -855,7 +856,7 @@ struct CounterCoverageMappingBuilder
return Counter::getCounter(CounterMap[S]);
}
- unsigned getRegionBitmap(const Stmt *S) { return MCDCBitmapMap[S]; }
+ unsigned getRegionBitmap(const Stmt *S) { return MCDCState.BitmapMap[S]; }
/// Push a region onto the stack.
///
@@ -865,8 +866,7 @@ struct CounterCoverageMappingBuilder
std::optional<SourceLocation> StartLoc = std::nullopt,
std::optional<SourceLocation> EndLoc = std::nullopt,
std::optional<Counter> FalseCount = std::nullopt,
- MCDCConditionID ID = 0, MCDCConditionID TrueID = 0,
- MCDCConditionID FalseID = 0) {
+ const mcdc::Parameters &BranchParams = std::monostate()) {
if (StartLoc && !FalseCount) {
MostRecentLocation = *StartLoc;
@@ -885,19 +885,17 @@ struct CounterCoverageMappingBuilder
StartLoc = std::nullopt;
if (EndLoc && EndLoc->isInvalid())
EndLoc = std::nullopt;
- RegionStack.emplace_back(Count, FalseCount,
- MCDCParameters{0, 0, ID, TrueID, FalseID},
- StartLoc, EndLoc);
+ RegionStack.emplace_back(Count, FalseCount, BranchParams, StartLoc, EndLoc);
return RegionStack.size() - 1;
}
- size_t pushRegion(unsigned BitmapIdx, unsigned Conditions,
+ size_t pushRegion(unsigned BitmapIdx, uint16_t Conditions,
std::optional<SourceLocation> StartLoc = std::nullopt,
std::optional<SourceLocation> EndLoc = std::nullopt) {
- RegionStack.emplace_back(MCDCParameters{BitmapIdx, Conditions}, StartLoc,
- EndLoc);
+ RegionStack.emplace_back(mcdc::DecisionParameters{BitmapIdx, Conditions},
+ StartLoc, EndLoc);
return RegionStack.size() - 1;
}
@@ -1024,15 +1022,12 @@ struct CounterCoverageMappingBuilder
return (Cond->EvaluateAsInt(Result, CVM.getCodeGenModule().getContext()));
}
- using MCDCDecisionIDPair = MCDCCoverageBuilder::DecisionIDPair;
-
/// Create a Branch Region around an instrumentable condition for coverage
/// and add it to the function's SourceRegions. A branch region tracks a
/// "True" counter and a "False" counter for boolean expressions that
/// result in the generation of a branch.
- void
- createBranchRegion(const Expr *C, Counter TrueCnt, Counter FalseCnt,
- const MCDCDecisionIDPair &IDPair = MCDCDecisionIDPair()) {
+ void createBranchRegion(const Expr *C, Counter TrueCnt, Counter FalseCnt,
+ const mcdc::ConditionIDs &Conds = {}) {
// Check for NULL conditions.
if (!C)
return;
@@ -1042,9 +1037,10 @@ struct CounterCoverageMappingBuilder
// function's SourceRegions) because it doesn't apply to any other source
// code other than the Condition.
if (CodeGenFunction::isInstrumentedCondition(C)) {
- MCDCConditionID ID = MCDCBuilder.getCondID(C);
- MCDCConditionID TrueID = IDPair.TrueID;
- MCDCConditionID FalseID = IDPair.FalseID;
+ mcdc::Parameters BranchParams;
+ mcdc::ConditionID ID = MCDCBuilder.getCondID(C);
+ if (ID >= 0)
+ BranchParams = mcdc::BranchParameters{ID, Conds};
// If a condition can fold to true or false, the corresponding branch
// will be removed. Create a region with both counters hard-coded to
@@ -1054,11 +1050,11 @@ struct CounterCoverageMappingBuilder
// CodeGenFunction.c always returns false, but that is very heavy-handed.
if (ConditionFoldsToBool(C))
popRegions(pushRegion(Counter::getZero(), getStart(C), getEnd(C),
- Counter::getZero(), ID, TrueID, FalseID));
+ Counter::getZero(), BranchParams));
else
// Otherwise, create a region with the True counter and False counter.
- popRegions(pushRegion(TrueCnt, getStart(C), getEnd(C), FalseCnt, ID,
- TrueID, FalseID));
+ popRegions(pushRegion(TrueCnt, getStart(C), getEnd(C), FalseCnt,
+ BranchParams));
}
}
@@ -1149,12 +1145,9 @@ struct CounterCoverageMappingBuilder
// we've seen this region.
if (StartLocs.insert(Loc).second) {
if (I.isBranch())
- SourceRegions.emplace_back(
- I.getCounter(), I.getFalseCounter(),
- MCDCParameters{0, 0, I.getMCDCParams().ID,
- I.getMCDCParams().TrueID,
- I.getMCDCParams().FalseID},
- Loc, getEndOfFileOrMacro(Loc), I.isBranch());
+ SourceRegions.emplace_back(I.getCounter(), I.getFalseCounter(),
+ I.getMCDCParams(), Loc,
+ getEndOfFileOrMacro(Loc), I.isBranch());
else
SourceRegions.emplace_back(I.getCounter(), Loc,
getEndOfFileOrMacro(Loc));
@@ -1337,12 +1330,9 @@ struct CounterCoverageMappingBuilder
CounterCoverageMappingBuilder(
CoverageMappingModuleGen &CVM,
llvm::DenseMap<const Stmt *, unsigned> &CounterMap,
- llvm::DenseMap<const Stmt *, unsigned> &MCDCBitmapMap,
- llvm::DenseMap<const Stmt *, MCDCConditionID> &CondIDMap,
- SourceManager &SM, const LangOptions &LangOpts)
+ MCDC::State &MCDCState, SourceManager &SM, const LangOptions &LangOpts)
: CoverageMappingBuilder(CVM, SM, LangOpts), CounterMap(CounterMap),
- MCDCBitmapMap(MCDCBitmapMap),
- MCDCBuilder(CVM.getCodeGenModule(), CondIDMap, MCDCBitmapMap) {}
+ MCDCState(MCDCState), MCDCBuilder(CVM.getCodeGenModule(), MCDCState) {}
/// Write the mapping data to the output stream
void write(llvm::raw_ostream &OS) {
@@ -2120,9 +2110,10 @@ static void dump(llvm::raw_ostream &OS, StringRef FunctionName,
OS << "File " << R.FileID << ", " << R.LineStart << ":" << R.ColumnStart
<< " -> " << R.LineEnd << ":" << R.ColumnEnd << " = ";
- if (R.Kind == CounterMappingRegion::MCDCDecisionRegion) {
- OS << "M:" << R.MCDCParams.BitmapIdx;
- OS << ", C:" << R.MCDCParams.NumConditions;
+ if (const auto *DecisionParams =
+ std::get_if<mcdc::DecisionParameters>(&R.MCDCParams)) {
+ OS << "M:" << DecisionParams->BitmapIdx;
+ OS << ", C:" << DecisionParams->NumConditions;
} else {
Ctx.dump(R.Count, OS);
@@ -2133,9 +2124,11 @@ static void dump(llvm::raw_ostream &OS, StringRef FunctionName,
}
}
- if (R.Kind == CounterMappingRegion::MCDCBranchRegion) {
- OS << " [" << R.MCDCParams.ID << "," << R.MCDCParams.TrueID;
- OS << "," << R.MCDCParams.FalseID << "] ";
+ if (const auto *BranchParams =
+ std::get_if<mcdc::BranchParameters>(&R.MCDCParams)) {
+ OS << " [" << BranchParams->ID + 1 << ","
+ << BranchParams->Conds[true] + 1;
+ OS << "," << BranchParams->Conds[false] + 1 << "] ";
}
if (R.Kind == CounterMappingRegion::ExpansionRegion)
@@ -2344,9 +2337,9 @@ unsigned CoverageMappingModuleGen::getFileID(FileEntryRef File) {
void CoverageMappingGen::emitCounterMapping(const Decl *D,
llvm::raw_ostream &OS) {
- assert(CounterMap && MCDCBitmapMap);
- CounterCoverageMappingBuilder Walker(CVM, *CounterMap, *MCDCBitmapMap,
- *CondIDMap, SM, LangOpts);
+ assert(CounterMap && MCDCState);
+ CounterCoverageMappingBuilder Walker(CVM, *CounterMap, *MCDCState, SM,
+ LangOpts);
Walker.VisitDecl(D);
Walker.write(OS);
}
diff --git a/clang/lib/CodeGen/CoverageMappingGen.h b/clang/lib/CodeGen/CoverageMappingGen.h
index 62cea17..f7c59c4 100644
--- a/clang/lib/CodeGen/CoverageMappingGen.h
+++ b/clang/lib/CodeGen/CoverageMappingGen.h
@@ -91,6 +91,10 @@ namespace CodeGen {
class CodeGenModule;
+namespace MCDC {
+struct State;
+}
+
/// Organizes the cross-function state that is used while generating
/// code coverage mapping data.
class CoverageMappingModuleGen {
@@ -150,22 +154,20 @@ class CoverageMappingGen {
SourceManager &SM;
const LangOptions &LangOpts;
llvm::DenseMap<const Stmt *, unsigned> *CounterMap;
- llvm::DenseMap<const Stmt *, unsigned> *MCDCBitmapMap;
- llvm::DenseMap<const Stmt *, unsigned> *CondIDMap;
+ MCDC::State *MCDCState;
public:
CoverageMappingGen(CoverageMappingModuleGen &CVM, SourceManager &SM,
const LangOptions &LangOpts)
: CVM(CVM), SM(SM), LangOpts(LangOpts), CounterMap(nullptr),
- MCDCBitmapMap(nullptr), CondIDMap(nullptr) {}
+ MCDCState(nullptr) {}
CoverageMappingGen(CoverageMappingModuleGen &CVM, SourceManager &SM,
const LangOptions &LangOpts,
llvm::DenseMap<const Stmt *, unsigned> *CounterMap,
- llvm::DenseMap<const Stmt *, unsigned> *MCDCBitmapMap,
- llvm::DenseMap<const Stmt *, unsigned> *CondIDMap)
+ MCDC::State *MCDCState)
: CVM(CVM), SM(SM), LangOpts(LangOpts), CounterMap(CounterMap),
- MCDCBitmapMap(MCDCBitmapMap), CondIDMap(CondIDMap) {}
+ MCDCState(MCDCState) {}
/// Emit the coverage mapping data which maps the regions of
/// code to counters that will be used to find the execution
diff --git a/clang/lib/CodeGen/LinkInModulesPass.cpp b/clang/lib/CodeGen/LinkInModulesPass.cpp
index 6ce2b94..929539c 100644
--- a/clang/lib/CodeGen/LinkInModulesPass.cpp
+++ b/clang/lib/CodeGen/LinkInModulesPass.cpp
@@ -14,6 +14,10 @@
#include "LinkInModulesPass.h"
#include "BackendConsumer.h"
+#include "clang/Basic/CodeGenOptions.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+
using namespace llvm;
LinkInModulesPass::LinkInModulesPass(clang::BackendConsumer *BC,
@@ -21,9 +25,15 @@ LinkInModulesPass::LinkInModulesPass(clang::BackendConsumer *BC,
: BC(BC), ShouldLinkFiles(ShouldLinkFiles) {}
PreservedAnalyses LinkInModulesPass::run(Module &M, ModuleAnalysisManager &AM) {
+ if (!BC)
+ return PreservedAnalyses::all();
+
+ // Re-load bitcode modules from files
+ if (BC->ReloadModules(&M))
+ report_fatal_error("Bitcode module re-loading failed, aborted!");
- if (BC && BC->LinkInModules(&M, ShouldLinkFiles))
- report_fatal_error("Bitcode module linking failed, compilation aborted!");
+ if (BC->LinkInModules(&M, ShouldLinkFiles))
+ report_fatal_error("Bitcode module re-linking failed, aborted!");
return PreservedAnalyses::all();
}
diff --git a/clang/lib/CodeGen/MCDCState.h b/clang/lib/CodeGen/MCDCState.h
new file mode 100644
index 0000000..e6e3923
--- /dev/null
+++ b/clang/lib/CodeGen/MCDCState.h
@@ -0,0 +1,36 @@
+//===---- MCDCState.h - Per-Function MC/DC state ----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Per-Function MC/DC state for PGO
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_MCDCSTATE_H
+#define LLVM_CLANG_LIB_CODEGEN_MCDCSTATE_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ProfileData/Coverage/MCDCTypes.h"
+
+namespace clang {
+class Stmt;
+} // namespace clang
+
+namespace clang::CodeGen::MCDC {
+
+using namespace llvm::coverage::mcdc;
+
+/// Per-Function MC/DC state
+struct State {
+ unsigned BitmapBytes = 0;
+ llvm::DenseMap<const Stmt *, unsigned> BitmapMap;
+ llvm::DenseMap<const Stmt *, ConditionID> CondIDMap;
+};
+
+} // namespace clang::CodeGen::MCDC
+
+#endif // LLVM_CLANG_LIB_CODEGEN_MCDCSTATE_H
diff --git a/clang/lib/Driver/Action.cpp b/clang/lib/Driver/Action.cpp
index 849bf60..7b1a1bb 100644
--- a/clang/lib/Driver/Action.cpp
+++ b/clang/lib/Driver/Action.cpp
@@ -32,6 +32,8 @@ const char *Action::getClassName(ActionClass AC) {
case CompileJobClass: return "compiler";
case BackendJobClass: return "backend";
case AssembleJobClass: return "assembler";
+ case InstallAPIJobClass:
+ return "installapi";
case IfsMergeJobClass: return "interface-stub-merger";
case LinkJobClass: return "linker";
case LipoJobClass: return "lipo";
@@ -362,6 +364,11 @@ void ExtractAPIJobAction::anchor() {}
ExtractAPIJobAction::ExtractAPIJobAction(Action *Inputs, types::ID OutputType)
: JobAction(ExtractAPIJobClass, Inputs, OutputType) {}
+void InstallAPIJobAction::anchor() {}
+
+InstallAPIJobAction::InstallAPIJobAction(Action *Inputs, types::ID OutputType)
+ : JobAction(InstallAPIJobClass, Inputs, OutputType) {}
+
void AnalyzeJobAction::anchor() {}
AnalyzeJobAction::AnalyzeJobAction(Action *Input, types::ID OutputType)
diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp
index 00e1407..cf84ef2 100644
--- a/clang/lib/Driver/Driver.cpp
+++ b/clang/lib/Driver/Driver.cpp
@@ -4189,6 +4189,11 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
break;
}
+ if (auto *IAA = dyn_cast<InstallAPIJobAction>(Current)) {
+ Current = nullptr;
+ break;
+ }
+
// FIXME: Should we include any prior module file outputs as inputs of
// later actions in the same command line?
@@ -4319,6 +4324,13 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
if (!MergerInputs.empty())
Actions.push_back(
C.MakeAction<IfsMergeJobAction>(MergerInputs, types::TY_Image));
+ } else if (Args.hasArg(options::OPT_installapi)) {
+ // TODO: Lift restriction once operation can handle multiple inputs.
+ assert(Inputs.size() == 1 && "InstallAPI action can only handle 1 input");
+ const auto [InputType, InputArg] = Inputs.front();
+ Action *Current = C.MakeAction<InputAction>(*InputArg, InputType);
+ Actions.push_back(
+ C.MakeAction<InstallAPIJobAction>(Current, types::TY_TextAPI));
}
for (auto Opt : {options::OPT_print_supported_cpus,
@@ -4762,6 +4774,8 @@ Action *Driver::ConstructPhaseAction(
return C.MakeAction<VerifyPCHJobAction>(Input, types::TY_Nothing);
if (Args.hasArg(options::OPT_extract_api))
return C.MakeAction<ExtractAPIJobAction>(Input, types::TY_API_INFO);
+ if (Args.hasArg(options::OPT_installapi))
+ return C.MakeAction<InstallAPIJobAction>(Input, types::TY_TextAPI);
return C.MakeAction<CompileJobAction>(Input, types::TY_LLVM_BC);
}
case phases::Backend: {
@@ -6441,7 +6455,7 @@ bool Driver::ShouldUseClangCompiler(const JobAction &JA) const {
// And say "no" if this is not a kind of action clang understands.
if (!isa<PreprocessJobAction>(JA) && !isa<PrecompileJobAction>(JA) &&
!isa<CompileJobAction>(JA) && !isa<BackendJobAction>(JA) &&
- !isa<ExtractAPIJobAction>(JA))
+ !isa<ExtractAPIJobAction>(JA) && !isa<InstallAPIJobAction>(JA))
return false;
return true;
diff --git a/clang/lib/Driver/ToolChain.cpp b/clang/lib/Driver/ToolChain.cpp
index 3880305..657577c 100644
--- a/clang/lib/Driver/ToolChain.cpp
+++ b/clang/lib/Driver/ToolChain.cpp
@@ -532,6 +532,7 @@ Tool *ToolChain::getTool(Action::ActionClass AC) const {
case Action::PrecompileJobClass:
case Action::PreprocessJobClass:
case Action::ExtractAPIJobClass:
+ case Action::InstallAPIJobClass:
case Action::AnalyzeJobClass:
case Action::MigrateJobClass:
case Action::VerifyPCHJobClass:
diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp
index 4459d86..47305f7 100644
--- a/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/clang/lib/Driver/ToolChains/Clang.cpp
@@ -4939,6 +4939,17 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Arg *ExtractAPIIgnoresFileArg =
Args.getLastArg(options::OPT_extract_api_ignores_EQ))
ExtractAPIIgnoresFileArg->render(Args, CmdArgs);
+ } else if (isa<InstallAPIJobAction>(JA)) {
+ if (!Triple.isOSDarwin())
+ D.Diag(diag::err_drv_installapi_unsupported) << Triple.str();
+
+ CmdArgs.push_back("-installapi");
+ // Add necessary library arguments for InstallAPI.
+ if (const Arg *A = Args.getLastArg(options::OPT_install__name))
+ A->render(Args, CmdArgs);
+ if (const Arg *A = Args.getLastArg(options::OPT_current__version))
+ A->render(Args, CmdArgs);
+
} else {
assert((isa<CompileJobAction>(JA) || isa<BackendJobAction>(JA)) &&
"Invalid action for clang tool.");
diff --git a/clang/lib/Driver/ToolChains/Flang.cpp b/clang/lib/Driver/ToolChains/Flang.cpp
index 23da08a..6168b42d 100644
--- a/clang/lib/Driver/ToolChains/Flang.cpp
+++ b/clang/lib/Driver/ToolChains/Flang.cpp
@@ -249,6 +249,20 @@ void Flang::AddRISCVTargetArgs(const ArgList &Args,
}
}
+void Flang::AddX86_64TargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ if (Arg *A = Args.getLastArg(options::OPT_masm_EQ)) {
+ StringRef Value = A->getValue();
+ if (Value == "intel" || Value == "att") {
+ CmdArgs.push_back(Args.MakeArgString("-mllvm"));
+ CmdArgs.push_back(Args.MakeArgString("-x86-asm-syntax=" + Value));
+ } else {
+ getToolChain().getDriver().Diag(diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << Value;
+ }
+ }
+}
+
static void addVSDefines(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs) {
@@ -374,6 +388,7 @@ void Flang::addTargetOptions(const ArgList &Args,
break;
case llvm::Triple::x86_64:
getTargetFeatures(D, Triple, Args, CmdArgs, /*ForAs*/ false);
+ AddX86_64TargetArgs(Args, CmdArgs);
break;
}
diff --git a/clang/lib/Driver/ToolChains/Flang.h b/clang/lib/Driver/ToolChains/Flang.h
index ec2e545..9f5e26b 100644
--- a/clang/lib/Driver/ToolChains/Flang.h
+++ b/clang/lib/Driver/ToolChains/Flang.h
@@ -77,6 +77,13 @@ private:
void AddRISCVTargetArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
+ /// Add specific options for X86_64 target.
+ ///
+ /// \param [in] Args The list of input driver arguments
+ /// \param [out] CmdArgs The list of output command arguments
+ void AddX86_64TargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+
/// Extract offload options from the driver arguments and add them to
/// the command arguments.
/// \param [in] C The current compilation for the driver invocation
diff --git a/clang/lib/Format/ContinuationIndenter.cpp b/clang/lib/Format/ContinuationIndenter.cpp
index 0b2ef97..159d130 100644
--- a/clang/lib/Format/ContinuationIndenter.cpp
+++ b/clang/lib/Format/ContinuationIndenter.cpp
@@ -329,12 +329,12 @@ bool ContinuationIndenter::canBreak(const LineState &State) {
// Don't break after very short return types (e.g. "void") as that is often
// unexpected.
if (Current.is(TT_FunctionDeclarationName)) {
- if (Style.AlwaysBreakAfterReturnType == FormatStyle::RTBS_None &&
+ if (Style.BreakAfterReturnType == FormatStyle::RTBS_None &&
State.Column < 6) {
return false;
}
- if (Style.AlwaysBreakAfterReturnType == FormatStyle::RTBS_ExceptShortType) {
+ if (Style.BreakAfterReturnType == FormatStyle::RTBS_ExceptShortType) {
assert(State.Column >= State.FirstIndent);
if (State.Column - State.FirstIndent < 6)
return false;
@@ -597,7 +597,7 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
!State.Line->ReturnTypeWrapped &&
// Don't break before a C# function when no break after return type.
(!Style.isCSharp() ||
- Style.AlwaysBreakAfterReturnType > FormatStyle::RTBS_ExceptShortType) &&
+ Style.BreakAfterReturnType > FormatStyle::RTBS_ExceptShortType) &&
// Don't always break between a JavaScript `function` and the function
// name.
!Style.isJavaScript() && Previous.isNot(tok::kw_template) &&
diff --git a/clang/lib/Format/Format.cpp b/clang/lib/Format/Format.cpp
index d2cc466..e67b210 100644
--- a/clang/lib/Format/Format.cpp
+++ b/clang/lib/Format/Format.cpp
@@ -877,6 +877,7 @@ template <> struct MappingTraits<FormatStyle> {
if (!IO.outputting()) {
IO.mapOptional("AlignEscapedNewlinesLeft", Style.AlignEscapedNewlines);
IO.mapOptional("AllowAllConstructorInitializersOnNextLine", OnNextLine);
+ IO.mapOptional("AlwaysBreakAfterReturnType", Style.BreakAfterReturnType);
IO.mapOptional("AlwaysBreakTemplateDeclarations",
Style.BreakTemplateDeclarations);
IO.mapOptional("BreakBeforeInheritanceComma",
@@ -941,8 +942,6 @@ template <> struct MappingTraits<FormatStyle> {
Style.AllowShortLoopsOnASingleLine);
IO.mapOptional("AlwaysBreakAfterDefinitionReturnType",
Style.AlwaysBreakAfterDefinitionReturnType);
- IO.mapOptional("AlwaysBreakAfterReturnType",
- Style.AlwaysBreakAfterReturnType);
IO.mapOptional("AlwaysBreakBeforeMultilineStrings",
Style.AlwaysBreakBeforeMultilineStrings);
IO.mapOptional("AttributeMacros", Style.AttributeMacros);
@@ -957,6 +956,7 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("BreakAfterAttributes", Style.BreakAfterAttributes);
IO.mapOptional("BreakAfterJavaFieldAnnotations",
Style.BreakAfterJavaFieldAnnotations);
+ IO.mapOptional("BreakAfterReturnType", Style.BreakAfterReturnType);
IO.mapOptional("BreakArrays", Style.BreakArrays);
IO.mapOptional("BreakBeforeBinaryOperators",
Style.BreakBeforeBinaryOperators);
@@ -1126,17 +1126,16 @@ template <> struct MappingTraits<FormatStyle> {
Style.WhitespaceSensitiveMacros);
// If AlwaysBreakAfterDefinitionReturnType was specified but
- // AlwaysBreakAfterReturnType was not, initialize the latter from the
- // former for backwards compatibility.
+ // BreakAfterReturnType was not, initialize the latter from the former for
+ // backwards compatibility.
if (Style.AlwaysBreakAfterDefinitionReturnType != FormatStyle::DRTBS_None &&
- Style.AlwaysBreakAfterReturnType == FormatStyle::RTBS_None) {
+ Style.BreakAfterReturnType == FormatStyle::RTBS_None) {
if (Style.AlwaysBreakAfterDefinitionReturnType ==
FormatStyle::DRTBS_All) {
- Style.AlwaysBreakAfterReturnType = FormatStyle::RTBS_AllDefinitions;
+ Style.BreakAfterReturnType = FormatStyle::RTBS_AllDefinitions;
} else if (Style.AlwaysBreakAfterDefinitionReturnType ==
FormatStyle::DRTBS_TopLevel) {
- Style.AlwaysBreakAfterReturnType =
- FormatStyle::RTBS_TopLevelDefinitions;
+ Style.BreakAfterReturnType = FormatStyle::RTBS_TopLevelDefinitions;
}
}
@@ -1407,29 +1406,28 @@ static void expandPresetsSpacesInParens(FormatStyle &Expanded) {
FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
FormatStyle LLVMStyle;
- LLVMStyle.InheritsParentConfig = false;
- LLVMStyle.Language = Language;
LLVMStyle.AccessModifierOffset = -2;
- LLVMStyle.AlignEscapedNewlines = FormatStyle::ENAS_Right;
LLVMStyle.AlignAfterOpenBracket = FormatStyle::BAS_Align;
LLVMStyle.AlignArrayOfStructures = FormatStyle::AIAS_None;
- LLVMStyle.AlignOperands = FormatStyle::OAS_Align;
LLVMStyle.AlignConsecutiveAssignments = {};
- LLVMStyle.AlignConsecutiveAssignments.Enabled = false;
- LLVMStyle.AlignConsecutiveAssignments.AcrossEmptyLines = false;
LLVMStyle.AlignConsecutiveAssignments.AcrossComments = false;
+ LLVMStyle.AlignConsecutiveAssignments.AcrossEmptyLines = false;
LLVMStyle.AlignConsecutiveAssignments.AlignCompound = false;
LLVMStyle.AlignConsecutiveAssignments.AlignFunctionPointers = false;
+ LLVMStyle.AlignConsecutiveAssignments.Enabled = false;
LLVMStyle.AlignConsecutiveAssignments.PadOperators = true;
LLVMStyle.AlignConsecutiveBitFields = {};
LLVMStyle.AlignConsecutiveDeclarations = {};
LLVMStyle.AlignConsecutiveMacros = {};
LLVMStyle.AlignConsecutiveShortCaseStatements = {};
+ LLVMStyle.AlignEscapedNewlines = FormatStyle::ENAS_Right;
+ LLVMStyle.AlignOperands = FormatStyle::OAS_Align;
LLVMStyle.AlignTrailingComments = {};
LLVMStyle.AlignTrailingComments.Kind = FormatStyle::TCAS_Always;
LLVMStyle.AlignTrailingComments.OverEmptyLines = 0;
LLVMStyle.AllowAllArgumentsOnNextLine = true;
LLVMStyle.AllowAllParametersOfDeclarationOnNextLine = true;
+ LLVMStyle.AllowBreakBeforeNoexceptSpecifier = FormatStyle::BBNSS_Never;
LLVMStyle.AllowShortBlocksOnASingleLine = FormatStyle::SBS_Never;
LLVMStyle.AllowShortCaseLabelsOnASingleLine = false;
LLVMStyle.AllowShortCompoundRequirementOnASingleLine = true;
@@ -1438,14 +1436,12 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.AllowShortIfStatementsOnASingleLine = FormatStyle::SIS_Never;
LLVMStyle.AllowShortLambdasOnASingleLine = FormatStyle::SLS_All;
LLVMStyle.AllowShortLoopsOnASingleLine = false;
- LLVMStyle.AlwaysBreakAfterReturnType = FormatStyle::RTBS_None;
LLVMStyle.AlwaysBreakAfterDefinitionReturnType = FormatStyle::DRTBS_None;
LLVMStyle.AlwaysBreakBeforeMultilineStrings = false;
- LLVMStyle.BreakTemplateDeclarations = FormatStyle::BTDS_MultiLine;
LLVMStyle.AttributeMacros.push_back("__capability");
- LLVMStyle.BitFieldColonSpacing = FormatStyle::BFCS_Both;
LLVMStyle.BinPackArguments = true;
LLVMStyle.BinPackParameters = true;
+ LLVMStyle.BitFieldColonSpacing = FormatStyle::BFCS_Both;
LLVMStyle.BracedInitializerIndentWidth = std::nullopt;
LLVMStyle.BraceWrapping = {/*AfterCaseLabel=*/false,
/*AfterClass=*/false,
@@ -1468,16 +1464,17 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.BreakAdjacentStringLiterals = true;
LLVMStyle.BreakAfterAttributes = FormatStyle::ABS_Leave;
LLVMStyle.BreakAfterJavaFieldAnnotations = false;
+ LLVMStyle.BreakAfterReturnType = FormatStyle::RTBS_None;
LLVMStyle.BreakArrays = true;
LLVMStyle.BreakBeforeBinaryOperators = FormatStyle::BOS_None;
LLVMStyle.BreakBeforeBraces = FormatStyle::BS_Attach;
LLVMStyle.BreakBeforeConceptDeclarations = FormatStyle::BBCDS_Always;
LLVMStyle.BreakBeforeInlineASMColon = FormatStyle::BBIAS_OnlyMultiline;
- LLVMStyle.AllowBreakBeforeNoexceptSpecifier = FormatStyle::BBNSS_Never;
LLVMStyle.BreakBeforeTernaryOperators = true;
LLVMStyle.BreakConstructorInitializers = FormatStyle::BCIS_BeforeColon;
LLVMStyle.BreakInheritanceList = FormatStyle::BILS_BeforeColon;
LLVMStyle.BreakStringLiterals = true;
+ LLVMStyle.BreakTemplateDeclarations = FormatStyle::BTDS_MultiLine;
LLVMStyle.ColumnLimit = 80;
LLVMStyle.CommentPragmas = "^ IWYU pragma:";
LLVMStyle.CompactNamespaces = false;
@@ -1494,22 +1491,23 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.ForEachMacros.push_back("Q_FOREACH");
LLVMStyle.ForEachMacros.push_back("BOOST_FOREACH");
LLVMStyle.IfMacros.push_back("KJ_IF_MAYBE");
+ LLVMStyle.IncludeStyle.IncludeBlocks = tooling::IncludeStyle::IBS_Preserve;
LLVMStyle.IncludeStyle.IncludeCategories = {
{"^\"(llvm|llvm-c|clang|clang-c)/", 2, 0, false},
{"^(<|\"(gtest|gmock|isl|json)/)", 3, 0, false},
{".*", 1, 0, false}};
LLVMStyle.IncludeStyle.IncludeIsMainRegex = "(Test)?$";
- LLVMStyle.IncludeStyle.IncludeBlocks = tooling::IncludeStyle::IBS_Preserve;
LLVMStyle.IncludeStyle.MainIncludeChar = tooling::IncludeStyle::MICD_Quote;
LLVMStyle.IndentAccessModifiers = false;
- LLVMStyle.IndentCaseLabels = false;
LLVMStyle.IndentCaseBlocks = false;
+ LLVMStyle.IndentCaseLabels = false;
LLVMStyle.IndentExternBlock = FormatStyle::IEBS_AfterExternBlock;
LLVMStyle.IndentGotoLabels = true;
LLVMStyle.IndentPPDirectives = FormatStyle::PPDIS_None;
LLVMStyle.IndentRequiresClause = true;
LLVMStyle.IndentWidth = 2;
LLVMStyle.IndentWrappedFunctionNames = false;
+ LLVMStyle.InheritsParentConfig = false;
LLVMStyle.InsertBraces = false;
LLVMStyle.InsertNewlineAtEOF = false;
LLVMStyle.InsertTrailingCommas = FormatStyle::TCS_None;
@@ -1522,6 +1520,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.KeepEmptyLinesAtEOF = false;
LLVMStyle.KeepEmptyLinesAtTheStartOfBlocks = true;
LLVMStyle.LambdaBodyIndentation = FormatStyle::LBI_Signature;
+ LLVMStyle.Language = Language;
LLVMStyle.LineEnding = FormatStyle::LE_DeriveLF;
LLVMStyle.MaxEmptyLinesToKeep = 1;
LLVMStyle.NamespaceIndentation = FormatStyle::NI_None;
@@ -1551,7 +1550,9 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.SpaceAfterLogicalNot = false;
LLVMStyle.SpaceAfterTemplateKeyword = true;
LLVMStyle.SpaceAroundPointerQualifiers = FormatStyle::SAPQ_Default;
+ LLVMStyle.SpaceBeforeAssignmentOperators = true;
LLVMStyle.SpaceBeforeCaseColon = false;
+ LLVMStyle.SpaceBeforeCpp11BracedList = false;
LLVMStyle.SpaceBeforeCtorInitializerColon = true;
LLVMStyle.SpaceBeforeInheritanceColon = true;
LLVMStyle.SpaceBeforeJsonColon = false;
@@ -1561,8 +1562,6 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.SpaceBeforeParensOptions.AfterForeachMacros = true;
LLVMStyle.SpaceBeforeParensOptions.AfterIfMacros = true;
LLVMStyle.SpaceBeforeRangeBasedForLoopColon = true;
- LLVMStyle.SpaceBeforeAssignmentOperators = true;
- LLVMStyle.SpaceBeforeCpp11BracedList = false;
LLVMStyle.SpaceBeforeSquareBrackets = false;
LLVMStyle.SpaceInEmptyBlock = false;
LLVMStyle.SpacesBeforeTrailingComments = 1;
@@ -1585,16 +1584,16 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.WhitespaceSensitiveMacros.push_back("STRINGIZE");
LLVMStyle.PenaltyBreakAssignment = prec::Assignment;
+ LLVMStyle.PenaltyBreakBeforeFirstCallParameter = 19;
LLVMStyle.PenaltyBreakComment = 300;
LLVMStyle.PenaltyBreakFirstLessLess = 120;
- LLVMStyle.PenaltyBreakString = 1000;
- LLVMStyle.PenaltyExcessCharacter = 1000000;
- LLVMStyle.PenaltyReturnTypeOnItsOwnLine = 60;
- LLVMStyle.PenaltyBreakBeforeFirstCallParameter = 19;
LLVMStyle.PenaltyBreakOpenParenthesis = 0;
LLVMStyle.PenaltyBreakScopeResolution = 500;
+ LLVMStyle.PenaltyBreakString = 1000;
LLVMStyle.PenaltyBreakTemplateDeclaration = prec::Relational;
+ LLVMStyle.PenaltyExcessCharacter = 1000000;
LLVMStyle.PenaltyIndentedWhitespace = 0;
+ LLVMStyle.PenaltyReturnTypeOnItsOwnLine = 60;
// Defaults that differ when not C++.
switch (Language) {
@@ -1633,12 +1632,12 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
GoogleStyle.AlwaysBreakBeforeMultilineStrings = true;
GoogleStyle.BreakTemplateDeclarations = FormatStyle::BTDS_Yes;
GoogleStyle.DerivePointerAlignment = true;
+ GoogleStyle.IncludeStyle.IncludeBlocks = tooling::IncludeStyle::IBS_Regroup;
GoogleStyle.IncludeStyle.IncludeCategories = {{"^<ext/.*\\.h>", 2, 0, false},
{"^<.*\\.h>", 1, 0, false},
{"^<.*", 2, 0, false},
{".*", 3, 0, false}};
GoogleStyle.IncludeStyle.IncludeIsMainRegex = "([-_](test|unittest))?$";
- GoogleStyle.IncludeStyle.IncludeBlocks = tooling::IncludeStyle::IBS_Regroup;
GoogleStyle.IndentCaseLabels = true;
GoogleStyle.KeepEmptyLinesAtTheStartOfBlocks = false;
GoogleStyle.ObjCBinPackProtocolList = FormatStyle::BPS_Never;
@@ -1693,8 +1692,8 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
GoogleStyle.SpacesBeforeTrailingComments = 2;
GoogleStyle.Standard = FormatStyle::LS_Auto;
- GoogleStyle.PenaltyReturnTypeOnItsOwnLine = 200;
GoogleStyle.PenaltyBreakBeforeFirstCallParameter = 1;
+ GoogleStyle.PenaltyReturnTypeOnItsOwnLine = 200;
if (Language == FormatStyle::LK_Java) {
GoogleStyle.AlignAfterOpenBracket = FormatStyle::BAS_DontAlign;
@@ -1722,22 +1721,22 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
// TODO: enable once decided, in particular re disabling bin packing.
// https://google.github.io/styleguide/jsguide.html#features-arrays-trailing-comma
// GoogleStyle.InsertTrailingCommas = FormatStyle::TCS_Wrapped;
+ GoogleStyle.JavaScriptQuotes = FormatStyle::JSQS_Single;
+ GoogleStyle.JavaScriptWrapImports = false;
GoogleStyle.MaxEmptyLinesToKeep = 3;
GoogleStyle.NamespaceIndentation = FormatStyle::NI_All;
GoogleStyle.SpacesInContainerLiterals = false;
- GoogleStyle.JavaScriptQuotes = FormatStyle::JSQS_Single;
- GoogleStyle.JavaScriptWrapImports = false;
} else if (Language == FormatStyle::LK_Proto) {
GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Empty;
GoogleStyle.AlwaysBreakBeforeMultilineStrings = false;
- GoogleStyle.SpacesInContainerLiterals = false;
- GoogleStyle.Cpp11BracedListStyle = false;
// This affects protocol buffer options specifications and text protos.
// Text protos are currently mostly formatted inside C++ raw string literals
// and often the current breaking behavior of string literals is not
// beneficial there. Investigate turning this on once proper string reflow
// has been implemented.
GoogleStyle.BreakStringLiterals = false;
+ GoogleStyle.Cpp11BracedListStyle = false;
+ GoogleStyle.SpacesInContainerLiterals = false;
} else if (Language == FormatStyle::LK_ObjC) {
GoogleStyle.AlwaysBreakBeforeMultilineStrings = false;
GoogleStyle.ColumnLimit = 100;
@@ -1821,15 +1820,15 @@ FormatStyle getMozillaStyle() {
FormatStyle MozillaStyle = getLLVMStyle();
MozillaStyle.AllowAllParametersOfDeclarationOnNextLine = false;
MozillaStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Inline;
- MozillaStyle.AlwaysBreakAfterReturnType = FormatStyle::RTBS_TopLevel;
MozillaStyle.AlwaysBreakAfterDefinitionReturnType =
FormatStyle::DRTBS_TopLevel;
- MozillaStyle.BreakTemplateDeclarations = FormatStyle::BTDS_Yes;
- MozillaStyle.BinPackParameters = false;
MozillaStyle.BinPackArguments = false;
+ MozillaStyle.BinPackParameters = false;
+ MozillaStyle.BreakAfterReturnType = FormatStyle::RTBS_TopLevel;
MozillaStyle.BreakBeforeBraces = FormatStyle::BS_Mozilla;
MozillaStyle.BreakConstructorInitializers = FormatStyle::BCIS_BeforeComma;
MozillaStyle.BreakInheritanceList = FormatStyle::BILS_BeforeComma;
+ MozillaStyle.BreakTemplateDeclarations = FormatStyle::BTDS_Yes;
MozillaStyle.ConstructorInitializerIndentWidth = 2;
MozillaStyle.ContinuationIndentWidth = 2;
MozillaStyle.Cpp11BracedListStyle = false;
@@ -1854,8 +1853,8 @@ FormatStyle getWebKitStyle() {
Style.BreakBeforeBinaryOperators = FormatStyle::BOS_All;
Style.BreakBeforeBraces = FormatStyle::BS_WebKit;
Style.BreakConstructorInitializers = FormatStyle::BCIS_BeforeComma;
- Style.Cpp11BracedListStyle = false;
Style.ColumnLimit = 0;
+ Style.Cpp11BracedListStyle = false;
Style.FixNamespaceComments = false;
Style.IndentWidth = 4;
Style.NamespaceIndentation = FormatStyle::NI_Inner;
@@ -1870,12 +1869,12 @@ FormatStyle getWebKitStyle() {
FormatStyle getGNUStyle() {
FormatStyle Style = getLLVMStyle();
Style.AlwaysBreakAfterDefinitionReturnType = FormatStyle::DRTBS_All;
- Style.AlwaysBreakAfterReturnType = FormatStyle::RTBS_AllDefinitions;
+ Style.BreakAfterReturnType = FormatStyle::RTBS_AllDefinitions;
Style.BreakBeforeBinaryOperators = FormatStyle::BOS_All;
Style.BreakBeforeBraces = FormatStyle::BS_GNU;
Style.BreakBeforeTernaryOperators = true;
- Style.Cpp11BracedListStyle = false;
Style.ColumnLimit = 79;
+ Style.Cpp11BracedListStyle = false;
Style.FixNamespaceComments = false;
Style.SpaceBeforeParens = FormatStyle::SBPO_Always;
Style.Standard = FormatStyle::LS_Cpp03;
@@ -1907,7 +1906,7 @@ FormatStyle getMicrosoftStyle(FormatStyle::LanguageKind Language) {
Style.AllowShortIfStatementsOnASingleLine = FormatStyle::SIS_Never;
Style.AllowShortLoopsOnASingleLine = false;
Style.AlwaysBreakAfterDefinitionReturnType = FormatStyle::DRTBS_None;
- Style.AlwaysBreakAfterReturnType = FormatStyle::RTBS_None;
+ Style.BreakAfterReturnType = FormatStyle::RTBS_None;
return Style;
}
diff --git a/clang/lib/Format/FormatTokenLexer.cpp b/clang/lib/Format/FormatTokenLexer.cpp
index 492e7e9..036f7e6 100644
--- a/clang/lib/Format/FormatTokenLexer.cpp
+++ b/clang/lib/Format/FormatTokenLexer.cpp
@@ -13,7 +13,11 @@
//===----------------------------------------------------------------------===//
#include "FormatTokenLexer.h"
-#include "TokenAnalyzer.h"
+#include "FormatToken.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Format/Format.h"
+#include "llvm/Support/Regex.h"
namespace clang {
namespace format {
@@ -24,12 +28,12 @@ FormatTokenLexer::FormatTokenLexer(
llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator,
IdentifierTable &IdentTable)
: FormatTok(nullptr), IsFirstToken(true), StateStack({LexerState::NORMAL}),
- Column(Column), TrailingWhitespace(0), SourceMgr(SourceMgr), ID(ID),
+ Column(Column), TrailingWhitespace(0),
+ LangOpts(getFormattingLangOpts(Style)), SourceMgr(SourceMgr), ID(ID),
Style(Style), IdentTable(IdentTable), Keywords(IdentTable),
Encoding(Encoding), Allocator(Allocator), FirstInLineIndex(0),
FormattingDisabled(false), MacroBlockBeginRegex(Style.MacroBlockBegin),
MacroBlockEndRegex(Style.MacroBlockEnd) {
- assert(LangOpts.CPlusPlus);
Lex.reset(new Lexer(ID, SourceMgr.getBufferOrFake(ID), SourceMgr, LangOpts));
Lex->SetKeepWhitespaceMode(true);
@@ -1438,7 +1442,7 @@ void FormatTokenLexer::readRawToken(FormatToken &Tok) {
void FormatTokenLexer::resetLexer(unsigned Offset) {
StringRef Buffer = SourceMgr.getBufferData(ID);
- assert(LangOpts.CPlusPlus);
+ LangOpts = getFormattingLangOpts(Style);
Lex.reset(new Lexer(SourceMgr.getLocForStartOfFile(ID), LangOpts,
Buffer.begin(), Buffer.begin() + Offset, Buffer.end()));
Lex->SetKeepWhitespaceMode(true);
diff --git a/clang/lib/Format/FormatTokenLexer.h b/clang/lib/Format/FormatTokenLexer.h
index ca91c5b..65dd733 100644
--- a/clang/lib/Format/FormatTokenLexer.h
+++ b/clang/lib/Format/FormatTokenLexer.h
@@ -17,9 +17,14 @@
#include "Encoding.h"
#include "FormatToken.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Format/Format.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringSet.h"
+#include "llvm/Support/Regex.h"
#include <stack>
@@ -115,6 +120,7 @@ private:
unsigned Column;
unsigned TrailingWhitespace;
std::unique_ptr<Lexer> Lex;
+ LangOptions LangOpts;
const SourceManager &SourceMgr;
FileID ID;
const FormatStyle &Style;
diff --git a/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp b/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp
index 3c2cedd..87823ae 100644
--- a/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp
+++ b/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp
@@ -79,7 +79,7 @@ IntegerLiteralSeparatorFixer::process(const Environment &Env,
AffectedRangeManager AffectedRangeMgr(SourceMgr, Env.getCharRanges());
const auto ID = Env.getFileID();
- assert(LangOpts.CPlusPlus);
+ const auto LangOpts = getFormattingLangOpts(Style);
Lexer Lex(ID, SourceMgr.getBufferOrFake(ID), SourceMgr, LangOpts);
Lex.SetCommentRetentionState(true);
diff --git a/clang/lib/Format/TokenAnalyzer.cpp b/clang/lib/Format/TokenAnalyzer.cpp
index f9d1fdb..bd648c4 100644
--- a/clang/lib/Format/TokenAnalyzer.cpp
+++ b/clang/lib/Format/TokenAnalyzer.cpp
@@ -35,8 +35,6 @@
namespace clang {
namespace format {
-LangOptions LangOpts;
-
// FIXME: Instead of printing the diagnostic we should store it and have a
// better way to return errors through the format APIs.
class FatalDiagnosticConsumer : public DiagnosticConsumer {
@@ -101,11 +99,9 @@ TokenAnalyzer::TokenAnalyzer(const Environment &Env, const FormatStyle &Style)
std::pair<tooling::Replacements, unsigned>
TokenAnalyzer::process(bool SkipAnnotation) {
- LangOpts = getFormattingLangOpts(Style);
-
tooling::Replacements Result;
llvm::SpecificBumpPtrAllocator<FormatToken> Allocator;
- IdentifierTable IdentTable(LangOpts);
+ IdentifierTable IdentTable(getFormattingLangOpts(Style));
FormatTokenLexer Lex(Env.getSourceManager(), Env.getFileID(),
Env.getFirstStartColumn(), Style, Encoding, Allocator,
IdentTable);
diff --git a/clang/lib/Format/TokenAnalyzer.h b/clang/lib/Format/TokenAnalyzer.h
index 18c1431..4086dab 100644
--- a/clang/lib/Format/TokenAnalyzer.h
+++ b/clang/lib/Format/TokenAnalyzer.h
@@ -34,8 +34,6 @@
namespace clang {
namespace format {
-extern LangOptions LangOpts;
-
class Environment {
public:
// This sets up an virtual file system with file \p FileName containing the
diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp
index b9a000f..ac876bf 100644
--- a/clang/lib/Format/TokenAnnotator.cpp
+++ b/clang/lib/Format/TokenAnnotator.cpp
@@ -1856,6 +1856,8 @@ private:
case tok::pp_elif:
Contexts.back().IsExpression = true;
next();
+ if (CurrentToken)
+ CurrentToken->SpacesRequiredBefore = true;
parseLine();
break;
default:
@@ -3726,14 +3728,13 @@ static bool isFunctionDeclarationName(bool IsCpp, const FormatToken &Current,
bool TokenAnnotator::mustBreakForReturnType(const AnnotatedLine &Line) const {
assert(Line.MightBeFunctionDecl);
- if ((Style.AlwaysBreakAfterReturnType == FormatStyle::RTBS_TopLevel ||
- Style.AlwaysBreakAfterReturnType ==
- FormatStyle::RTBS_TopLevelDefinitions) &&
+ if ((Style.BreakAfterReturnType == FormatStyle::RTBS_TopLevel ||
+ Style.BreakAfterReturnType == FormatStyle::RTBS_TopLevelDefinitions) &&
Line.Level > 0) {
return false;
}
- switch (Style.AlwaysBreakAfterReturnType) {
+ switch (Style.BreakAfterReturnType) {
case FormatStyle::RTBS_None:
case FormatStyle::RTBS_Automatic:
case FormatStyle::RTBS_ExceptShortType:
diff --git a/clang/lib/Format/UnwrappedLineParser.cpp b/clang/lib/Format/UnwrappedLineParser.cpp
index d84914c..8f6453a 100644
--- a/clang/lib/Format/UnwrappedLineParser.cpp
+++ b/clang/lib/Format/UnwrappedLineParser.cpp
@@ -2518,7 +2518,7 @@ bool UnwrappedLineParser::parseParens(TokenType AmpAmpTokenType) {
parseChildBlock();
break;
case tok::r_paren:
- if (!MightBeStmtExpr &&
+ if (!MightBeStmtExpr && !Line->InMacroBody &&
Style.RemoveParentheses > FormatStyle::RPS_Leave) {
const auto *Prev = LeftParen->Previous;
const auto *Next = Tokens->peekNextToken();
diff --git a/clang/lib/Frontend/CMakeLists.txt b/clang/lib/Frontend/CMakeLists.txt
index a916667..f443d88 100644
--- a/clang/lib/Frontend/CMakeLists.txt
+++ b/clang/lib/Frontend/CMakeLists.txt
@@ -7,6 +7,7 @@ set(LLVM_LINK_COMPONENTS
ProfileData
Support
TargetParser
+ TextAPI
)
add_clang_library(clangFrontend
@@ -27,6 +28,7 @@ add_clang_library(clangFrontend
HeaderIncludeGen.cpp
InitPreprocessor.cpp
LayoutOverrideSource.cpp
+ InstallAPIConsumer.cpp
LogDiagnosticPrinter.cpp
ModuleDependencyCollector.cpp
MultiplexConsumer.cpp
@@ -53,6 +55,7 @@ add_clang_library(clangFrontend
clangBasic
clangDriver
clangEdit
+ clangInstallAPI
clangLex
clangParse
clangSema
diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp
index 8d7b75b..bcb3124 100644
--- a/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/clang/lib/Frontend/CompilerInvocation.cpp
@@ -149,7 +149,8 @@ CompilerInvocationBase::CompilerInvocationBase()
FSOpts(std::make_shared<FileSystemOptions>()),
FrontendOpts(std::make_shared<FrontendOptions>()),
DependencyOutputOpts(std::make_shared<DependencyOutputOptions>()),
- PreprocessorOutputOpts(std::make_shared<PreprocessorOutputOptions>()) {}
+ PreprocessorOutputOpts(std::make_shared<PreprocessorOutputOptions>()),
+ InstallAPIOpts(std::make_shared<InstallAPIOptions>()) {}
CompilerInvocationBase &
CompilerInvocationBase::deep_copy_assign(const CompilerInvocationBase &X) {
@@ -167,6 +168,7 @@ CompilerInvocationBase::deep_copy_assign(const CompilerInvocationBase &X) {
FrontendOpts = make_shared_copy(X.getFrontendOpts());
DependencyOutputOpts = make_shared_copy(X.getDependencyOutputOpts());
PreprocessorOutputOpts = make_shared_copy(X.getPreprocessorOutputOpts());
+ InstallAPIOpts = make_shared_copy(X.getInstallAPIOpts());
}
return *this;
}
@@ -187,6 +189,7 @@ CompilerInvocationBase::shallow_copy_assign(const CompilerInvocationBase &X) {
FrontendOpts = X.FrontendOpts;
DependencyOutputOpts = X.DependencyOutputOpts;
PreprocessorOutputOpts = X.PreprocessorOutputOpts;
+ InstallAPIOpts = X.InstallAPIOpts;
}
return *this;
}
@@ -2158,6 +2161,34 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
return Diags.getNumErrors() == NumErrorsBefore;
}
+static bool ParseInstallAPIArgs(InstallAPIOptions &Opts, ArgList &Args,
+ DiagnosticsEngine &Diags,
+ frontend::ActionKind Action) {
+ unsigned NumErrorsBefore = Diags.getNumErrors();
+
+ InstallAPIOptions &InstallAPIOpts = Opts;
+#define INSTALLAPI_OPTION_WITH_MARSHALLING(...) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, Diags, __VA_ARGS__)
+#include "clang/Driver/Options.inc"
+#undef INSTALLAPI_OPTION_WITH_MARSHALLING
+ if (Arg *A = Args.getLastArg(options::OPT_current__version))
+ Opts.CurrentVersion.parse64(A->getValue());
+
+ return Diags.getNumErrors() == NumErrorsBefore;
+}
+
+static void GenerateInstallAPIArgs(const InstallAPIOptions &Opts,
+ ArgumentConsumer Consumer) {
+ const InstallAPIOptions &InstallAPIOpts = Opts;
+#define INSTALLAPI_OPTION_WITH_MARSHALLING(...) \
+ GENERATE_OPTION_WITH_MARSHALLING(Consumer, __VA_ARGS__)
+#include "clang/Driver/Options.inc"
+#undef INSTALLAPI_OPTION_WITH_MARSHALLING
+ if (!Opts.CurrentVersion.empty())
+ GenerateArg(Consumer, OPT_current__version,
+ std::string(Opts.CurrentVersion));
+}
+
static void GenerateDependencyOutputArgs(const DependencyOutputOptions &Opts,
ArgumentConsumer Consumer) {
const DependencyOutputOptions &DependencyOutputOpts = Opts;
@@ -2557,6 +2588,7 @@ static const auto &getFrontendActionTable() {
{frontend::GeneratePCH, OPT_emit_pch},
{frontend::GenerateInterfaceStubs, OPT_emit_interface_stubs},
{frontend::InitOnly, OPT_init_only},
+ {frontend::InstallAPI, OPT_installapi},
{frontend::ParseSyntaxOnly, OPT_fsyntax_only},
{frontend::ModuleFileInfo, OPT_module_file_info},
{frontend::VerifyPCH, OPT_verify_pch},
@@ -4280,6 +4312,7 @@ static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) {
case frontend::GenerateHeaderUnit:
case frontend::GeneratePCH:
case frontend::GenerateInterfaceStubs:
+ case frontend::InstallAPI:
case frontend::ParseSyntaxOnly:
case frontend::ModuleFileInfo:
case frontend::VerifyPCH:
@@ -4654,6 +4687,11 @@ bool CompilerInvocation::CreateFromArgsImpl(
Res.getDependencyOutputOpts().Targets.empty())
Diags.Report(diag::err_fe_dependency_file_requires_MT);
+ if (Args.hasArg(OPT_installapi)) {
+ ParseInstallAPIArgs(Res.getInstallAPIOpts(), Args, Diags,
+ Res.getFrontendOpts().ProgramAction);
+ }
+
// If sanitizer is enabled, disable OPT_ffine_grained_bitfield_accesses.
if (Res.getCodeGenOpts().FineGrainedBitfieldAccesses &&
!Res.getLangOpts().Sanitize.empty()) {
@@ -4844,6 +4882,7 @@ void CompilerInvocationBase::generateCC1CommandLine(
GeneratePreprocessorOutputArgs(getPreprocessorOutputOpts(), Consumer,
getFrontendOpts().ProgramAction);
GenerateDependencyOutputArgs(getDependencyOutputOpts(), Consumer);
+ GenerateInstallAPIArgs(getInstallAPIOpts(), Consumer);
}
std::vector<std::string> CompilerInvocationBase::getCC1CommandLine() const {
diff --git a/clang/lib/Frontend/InitPreprocessor.cpp b/clang/lib/Frontend/InitPreprocessor.cpp
index 877e205..1b250cd 100644
--- a/clang/lib/Frontend/InitPreprocessor.cpp
+++ b/clang/lib/Frontend/InitPreprocessor.cpp
@@ -768,6 +768,60 @@ void InitializeOpenCLFeatureTestMacros(const TargetInfo &TI,
Builder.defineMacro("__opencl_c_int64");
}
+llvm::SmallString<32> ConstructFixedPointLiteral(llvm::APFixedPoint Val,
+ llvm::StringRef Suffix) {
+ if (Val.isSigned() && Val == llvm::APFixedPoint::getMin(Val.getSemantics())) {
+ // When representing the min value of a signed fixed point type in source
+ // code, we cannot simply write `-<lowest value>`. For example, the min
+ // value of a `short _Fract` cannot be written as `-1.0hr`. This is because
+ // the parser will read this (and really any negative numerical literal) as
+ // a UnaryOperator that owns a FixedPointLiteral with a positive value
+ // rather than just a FixedPointLiteral with a negative value. Compiling
+ // `-1.0hr` results in an overflow to the maximal value of that fixed point
+ // type. The correct way to represent a signed min value is to instead split
+ // it into two halves, like `(-0.5hr-0.5hr)` which is what the standard
+ // defines SFRACT_MIN as.
+ llvm::SmallString<32> Literal;
+ Literal.push_back('(');
+ llvm::SmallString<32> HalfStr =
+ ConstructFixedPointLiteral(Val.shr(1), Suffix);
+ Literal += HalfStr;
+ Literal += HalfStr;
+ Literal.push_back(')');
+ return Literal;
+ }
+
+ llvm::SmallString<32> Str(Val.toString());
+ Str += Suffix;
+ return Str;
+}
+
+void DefineFixedPointMacros(const TargetInfo &TI, MacroBuilder &Builder,
+ llvm::StringRef TypeName, llvm::StringRef Suffix,
+ unsigned Width, unsigned Scale, bool Signed) {
+ // Saturation doesn't affect the size or scale of a fixed point type, so we
+ // don't need it here.
+ llvm::FixedPointSemantics FXSema(
+ Width, Scale, Signed, /*IsSaturated=*/false,
+ !Signed && TI.doUnsignedFixedPointTypesHavePadding());
+ llvm::SmallString<32> MacroPrefix("__");
+ MacroPrefix += TypeName;
+ Builder.defineMacro(MacroPrefix + "_EPSILON__",
+ ConstructFixedPointLiteral(
+ llvm::APFixedPoint::getEpsilon(FXSema), Suffix));
+ Builder.defineMacro(MacroPrefix + "_FBIT__", Twine(Scale));
+ Builder.defineMacro(
+ MacroPrefix + "_MAX__",
+ ConstructFixedPointLiteral(llvm::APFixedPoint::getMax(FXSema), Suffix));
+
+ // ISO/IEC TR 18037:2008 doesn't specify MIN macros for unsigned types since
+ // they're all just zero.
+ if (Signed)
+ Builder.defineMacro(
+ MacroPrefix + "_MIN__",
+ ConstructFixedPointLiteral(llvm::APFixedPoint::getMin(FXSema), Suffix));
+}
+
static void InitializePredefinedMacros(const TargetInfo &TI,
const LangOptions &LangOpts,
const FrontendOptions &FEOpts,
@@ -1097,6 +1151,47 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
TI.getTypeWidth(TI.getIntMaxType()) &&
"uintmax_t and intmax_t have different widths?");
+ if (LangOpts.FixedPoint) {
+ // Each unsigned type has the same width as their signed type.
+ DefineFixedPointMacros(TI, Builder, "SFRACT", "HR", TI.getShortFractWidth(),
+ TI.getShortFractScale(), /*Signed=*/true);
+ DefineFixedPointMacros(TI, Builder, "USFRACT", "UHR",
+ TI.getShortFractWidth(),
+ TI.getUnsignedShortFractScale(), /*Signed=*/false);
+ DefineFixedPointMacros(TI, Builder, "FRACT", "R", TI.getFractWidth(),
+ TI.getFractScale(), /*Signed=*/true);
+ DefineFixedPointMacros(TI, Builder, "UFRACT", "UR", TI.getFractWidth(),
+ TI.getUnsignedFractScale(), /*Signed=*/false);
+ DefineFixedPointMacros(TI, Builder, "LFRACT", "LR", TI.getLongFractWidth(),
+ TI.getLongFractScale(), /*Signed=*/true);
+ DefineFixedPointMacros(TI, Builder, "ULFRACT", "ULR",
+ TI.getLongFractWidth(),
+ TI.getUnsignedLongFractScale(), /*Signed=*/false);
+ DefineFixedPointMacros(TI, Builder, "SACCUM", "HK", TI.getShortAccumWidth(),
+ TI.getShortAccumScale(), /*Signed=*/true);
+ DefineFixedPointMacros(TI, Builder, "USACCUM", "UHK",
+ TI.getShortAccumWidth(),
+ TI.getUnsignedShortAccumScale(), /*Signed=*/false);
+ DefineFixedPointMacros(TI, Builder, "ACCUM", "K", TI.getAccumWidth(),
+ TI.getAccumScale(), /*Signed=*/true);
+ DefineFixedPointMacros(TI, Builder, "UACCUM", "UK", TI.getAccumWidth(),
+ TI.getUnsignedAccumScale(), /*Signed=*/false);
+ DefineFixedPointMacros(TI, Builder, "LACCUM", "LK", TI.getLongAccumWidth(),
+ TI.getLongAccumScale(), /*Signed=*/true);
+ DefineFixedPointMacros(TI, Builder, "ULACCUM", "ULK",
+ TI.getLongAccumWidth(),
+ TI.getUnsignedLongAccumScale(), /*Signed=*/false);
+
+ Builder.defineMacro("__SACCUM_IBIT__", Twine(TI.getShortAccumIBits()));
+ Builder.defineMacro("__USACCUM_IBIT__",
+ Twine(TI.getUnsignedShortAccumIBits()));
+ Builder.defineMacro("__ACCUM_IBIT__", Twine(TI.getAccumIBits()));
+ Builder.defineMacro("__UACCUM_IBIT__", Twine(TI.getUnsignedAccumIBits()));
+ Builder.defineMacro("__LACCUM_IBIT__", Twine(TI.getLongAccumIBits()));
+ Builder.defineMacro("__ULACCUM_IBIT__",
+ Twine(TI.getUnsignedLongAccumIBits()));
+ }
+
if (TI.hasFloat16Type())
DefineFloatMacros(Builder, "FLT16", &TI.getHalfFormat(), "F16");
DefineFloatMacros(Builder, "FLT", &TI.getFloatFormat(), "F");
diff --git a/clang/lib/Frontend/InstallAPIConsumer.cpp b/clang/lib/Frontend/InstallAPIConsumer.cpp
new file mode 100644
index 0000000..c0f22c1
--- /dev/null
+++ b/clang/lib/Frontend/InstallAPIConsumer.cpp
@@ -0,0 +1,43 @@
+//===--- InstallAPIConsumer.cpp -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendActions.h"
+#include "clang/InstallAPI/Context.h"
+
+using namespace clang;
+using namespace clang::installapi;
+
+std::unique_ptr<ASTConsumer>
+InstallAPIAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
+ const InstallAPIOptions &Opts = CI.getInstallAPIOpts();
+ InstallAPIContext Ctx;
+ Ctx.BA.InstallName = Opts.InstallName;
+ Ctx.BA.AppExtensionSafe = CI.getLangOpts().AppExt;
+ Ctx.BA.CurrentVersion = Opts.CurrentVersion;
+ // InstallAPI requires two level namespacing.
+ Ctx.BA.TwoLevelNamespace = true;
+ Ctx.TargetTriple = CI.getTarget().getTriple();
+
+ Ctx.Diags = &CI.getDiagnostics();
+ Ctx.OutputLoc = CI.getFrontendOpts().OutputFile;
+ Ctx.OS = CreateOutputFile(CI, InFile);
+ if (!Ctx.OS)
+ return nullptr;
+ return std::make_unique<InstallAPIConsumer>(std::move(Ctx));
+}
+
+std::unique_ptr<llvm::raw_pwrite_stream>
+InstallAPIAction::CreateOutputFile(CompilerInstance &CI, StringRef InFile) {
+ std::unique_ptr<raw_pwrite_stream> OS =
+ CI.createDefaultOutputFile(/*Binary=*/false, InFile, /*Extension=*/"tbd",
+ /*RemoveFileOnSignal=*/false);
+ if (!OS)
+ return nullptr;
+ return OS;
+}
diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
index 925879a..a47c474 100644
--- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
+++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
@@ -71,6 +71,8 @@ CreateFrontendBaseAction(CompilerInstance &CI) {
case GenerateInterfaceStubs:
return std::make_unique<GenerateInterfaceStubsAction>();
case InitOnly: return std::make_unique<InitOnlyAction>();
+ case InstallAPI:
+ return std::make_unique<InstallAPIAction>();
case ParseSyntaxOnly: return std::make_unique<SyntaxOnlyAction>();
case ModuleFileInfo: return std::make_unique<DumpModuleInfoAction>();
case VerifyPCH: return std::make_unique<VerifyPCHAction>();
diff --git a/clang/lib/Headers/hlsl/hlsl_basic_types.h b/clang/lib/Headers/hlsl/hlsl_basic_types.h
index 9ea605c..e96fa90 100644
--- a/clang/lib/Headers/hlsl/hlsl_basic_types.h
+++ b/clang/lib/Headers/hlsl/hlsl_basic_types.h
@@ -12,6 +12,13 @@
namespace hlsl {
// built-in scalar data types:
+/// \typedef template<typename Ty, int Size> using vector = Ty
+/// __attribute__((ext_vector_type(Size)))
+///
+/// \tparam Ty The base type of the vector may be any builtin integral or
+/// floating point type.
+/// \tparam Size The size of the vector may be any value between 1 and 4.
+
#ifdef __HLSL_ENABLE_16_BIT
// 16-bit integer.
typedef unsigned short uint16_t;
diff --git a/clang/lib/Headers/hlsl/hlsl_intrinsics.h b/clang/lib/Headers/hlsl/hlsl_intrinsics.h
index da153d8..a8b36d2 100644
--- a/clang/lib/Headers/hlsl/hlsl_intrinsics.h
+++ b/clang/lib/Headers/hlsl/hlsl_intrinsics.h
@@ -24,22 +24,35 @@ namespace hlsl {
//===----------------------------------------------------------------------===//
// abs builtins
//===----------------------------------------------------------------------===//
+
+/// \fn T abs(T Val)
+/// \brief Returns the absolute value of the input value, \a Val.
+/// \param Val The input value.
+
#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
int16_t abs(int16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
int16_t2 abs(int16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
int16_t3 abs(int16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
int16_t4 abs(int16_t4);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
half abs(half);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
half2 abs(half2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
half3 abs(half3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
half4 abs(half4);
#endif
@@ -83,13 +96,23 @@ double4 abs(double4);
//===----------------------------------------------------------------------===//
// ceil builtins
//===----------------------------------------------------------------------===//
+
+/// \fn T ceil(T Val)
+/// \brief Returns the smallest integer value that is greater than or equal to
+/// the input value, \a Val.
+/// \param Val The input value.
+
#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
half ceil(half);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
half2 ceil(half2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
half3 ceil(half3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
half4 ceil(half4);
#endif
@@ -115,13 +138,22 @@ double4 ceil(double4);
//===----------------------------------------------------------------------===//
// cos builtins
//===----------------------------------------------------------------------===//
+
+/// \fn T cos(T Val)
+/// \brief Returns the cosine of the input value, \a Val.
+/// \param Val The input value.
+
#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
half cos(half);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
half2 cos(half2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
half3 cos(half3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
half4 cos(half4);
#endif
@@ -147,13 +179,23 @@ double4 cos(double4);
//===----------------------------------------------------------------------===//
// floor builtins
//===----------------------------------------------------------------------===//
+
+/// \fn T floor(T Val)
+/// \brief Returns the largest integer that is less than or equal to the input
+/// value, \a Val.
+/// \param Val The input value.
+
#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
half floor(half);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
half2 floor(half2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
half3 floor(half3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
half4 floor(half4);
#endif
@@ -179,13 +221,25 @@ double4 floor(double4);
//===----------------------------------------------------------------------===//
// log builtins
//===----------------------------------------------------------------------===//
+
+/// \fn T log(T Val)
+/// \brief The base-e logarithm of the input value, \a Val parameter.
+/// \param Val The input value.
+///
+/// If \a Val is negative, this result is undefined. If \a Val is 0, this
+/// function returns negative infinity.
+
#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
half log(half);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
half2 log(half2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
half3 log(half3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
half4 log(half4);
#endif
@@ -211,13 +265,25 @@ double4 log(double4);
//===----------------------------------------------------------------------===//
// log10 builtins
//===----------------------------------------------------------------------===//
+
+/// \fn T log10(T Val)
+/// \brief The base-10 logarithm of the input value, \a Val parameter.
+/// \param Val The input value.
+///
+/// If \a Val is negative, this result is undefined. If \a Val is 0, this
+/// function returns negative infinity.
+
#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
half log10(half);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
half2 log10(half2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
half3 log10(half3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
half4 log10(half4);
#endif
@@ -243,13 +309,25 @@ double4 log10(double4);
//===----------------------------------------------------------------------===//
// log2 builtins
//===----------------------------------------------------------------------===//
+
+/// \fn T log2(T Val)
+/// \brief The base-2 logarithm of the input value, \a Val parameter.
+/// \param Val The input value.
+///
+/// If \a Val is negative, this result is undefined. If \a Val is 0, this
+/// function returns negative infinity.
+
#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
half log2(half);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
half2 log2(half2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
half3 log2(half3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
half4 log2(half4);
#endif
@@ -275,31 +353,49 @@ double4 log2(double4);
//===----------------------------------------------------------------------===//
// max builtins
//===----------------------------------------------------------------------===//
+
+/// \fn T max(T X, T Y)
+/// \brief Return the greater of \a X and \a Y.
+/// \param X The X input value.
+/// \param Y The Y input value.
+
#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
half max(half, half);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
half2 max(half2, half2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
half3 max(half3, half3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
half4 max(half4, half4);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
int16_t max(int16_t, int16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
int16_t2 max(int16_t2, int16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
int16_t3 max(int16_t3, int16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
int16_t4 max(int16_t4, int16_t4);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
uint16_t max(uint16_t, uint16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
uint16_t2 max(uint16_t2, uint16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
uint16_t3 max(uint16_t3, uint16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
uint16_t4 max(uint16_t4, uint16_t4);
#endif
@@ -361,31 +457,49 @@ double4 max(double4, double4);
//===----------------------------------------------------------------------===//
// min builtins
//===----------------------------------------------------------------------===//
+
+/// \fn T min(T X, T Y)
+/// \brief Return the lesser of \a X and \a Y.
+/// \param X The X input value.
+/// \param Y The Y input value.
+
#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
half min(half, half);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
half2 min(half2, half2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
half3 min(half3, half3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
half4 min(half4, half4);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
int16_t min(int16_t, int16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
int16_t2 min(int16_t2, int16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
int16_t3 min(int16_t3, int16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
int16_t4 min(int16_t4, int16_t4);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
uint16_t min(uint16_t, uint16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
uint16_t2 min(uint16_t2, uint16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
uint16_t3 min(uint16_t3, uint16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
uint16_t4 min(uint16_t4, uint16_t4);
#endif
@@ -447,13 +561,23 @@ double4 min(double4, double4);
//===----------------------------------------------------------------------===//
// pow builtins
//===----------------------------------------------------------------------===//
+
+/// \fn T pow(T Val, T Pow)
+/// \brief Return the value \a Val, raised to the power \a Pow.
+/// \param Val The input value.
+/// \param Pow The specified power.
+
#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
half pow(half, half);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
half2 pow(half2, half2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
half3 pow(half3, half3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
half4 pow(half4, half4);
#endif
@@ -479,22 +603,35 @@ double4 pow(double4, double4);
//===----------------------------------------------------------------------===//
// reversebits builtins
//===----------------------------------------------------------------------===//
+
+/// \fn T reversebits(T Val)
+/// \brief Return the value \a Val with the bit order reversed.
+/// \param Val The input value.
+
#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
int16_t reversebits(int16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
int16_t2 reversebits(int16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
int16_t3 reversebits(int16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
int16_t4 reversebits(int16_t4);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
uint16_t reversebits(uint16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
uint16_t2 reversebits(uint16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
uint16_t3 reversebits(uint16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
uint16_t4 reversebits(uint16_t4);
#endif
@@ -538,13 +675,22 @@ uint64_t4 reversebits(uint64_t4);
//===----------------------------------------------------------------------===//
// sin builtins
//===----------------------------------------------------------------------===//
+
+/// \fn T sin(T Val)
+/// \brief Returns the sine of the input value, \a Val.
+/// \param Val The input value.
+
#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
half sin(half);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
half2 sin(half2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
half3 sin(half3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
half4 sin(half4);
#endif
@@ -570,7 +716,13 @@ double4 sin(double4);
//===----------------------------------------------------------------------===//
// sqrt builtins
//===----------------------------------------------------------------------===//
+
+/// \fn T sqrt(T Val)
+/// \brief Returns the square root of the input value, \a Val.
+/// \param Val The input value.
+
#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_sqrtf16)
half sqrt(half In);
#endif
@@ -584,13 +736,22 @@ double sqrt(double In);
//===----------------------------------------------------------------------===//
// trunc builtins
//===----------------------------------------------------------------------===//
+
+/// \fn T trunc(T Val)
+/// \brief Returns the truncated integer value of the input value, \a Val.
+/// \param Val The input value.
+
#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
half trunc(half);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
half2 trunc(half2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
half3 trunc(half3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
half4 trunc(half4);
#endif
@@ -616,9 +777,16 @@ double4 trunc(double4);
//===----------------------------------------------------------------------===//
// Wave* builtins
//===----------------------------------------------------------------------===//
+
+/// \brief Counts the number of boolean variables which evaluate to true across
+/// all active lanes in the current wave.
+///
+/// \param Val The input boolean value.
+/// \return The number of lanes for which the boolean variable evaluates to
+/// true, across all active lanes in the current wave.
_HLSL_AVAILABILITY(shadermodel, 6.0)
_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_count_bits)
-uint WaveActiveCountBits(bool bBit);
+uint WaveActiveCountBits(bool Val);
} // namespace hlsl
#endif //_HLSL_HLSL_INTRINSICS_H_
diff --git a/clang/lib/InstallAPI/CMakeLists.txt b/clang/lib/InstallAPI/CMakeLists.txt
new file mode 100644
index 0000000..1476b73
--- /dev/null
+++ b/clang/lib/InstallAPI/CMakeLists.txt
@@ -0,0 +1,12 @@
+set(LLVM_LINK_COMPONENTS
+ Support
+ TextAPI
+ )
+
+add_clang_library(clangInstallAPI
+ Context.cpp
+
+ LINK_LIBS
+ clangAST
+ clangBasic
+ )
diff --git a/clang/lib/InstallAPI/Context.cpp b/clang/lib/InstallAPI/Context.cpp
new file mode 100644
index 0000000..d4df52f
--- /dev/null
+++ b/clang/lib/InstallAPI/Context.cpp
@@ -0,0 +1,27 @@
+//===--- InstallAPI/Context.cpp -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/InstallAPI/Context.h"
+#include "clang/AST/ASTContext.h"
+#include "llvm/TextAPI/TextAPIWriter.h"
+
+using namespace clang;
+using namespace clang::installapi;
+using namespace llvm::MachO;
+
+void InstallAPIConsumer::HandleTranslationUnit(ASTContext &Context) {
+ if (Context.getDiagnostics().hasErrorOccurred())
+ return;
+ InterfaceFile IF;
+ // Set library attributes captured through cc1 args.
+ Target T(Ctx.TargetTriple);
+ IF.addTarget(T);
+ IF.setFromBinaryAttrs(Ctx.BA, T);
+ if (auto Err = TextAPIWriter::writeToStream(*Ctx.OS, IF, Ctx.FT))
+ Ctx.Diags->Report(diag::err_cannot_open_file) << Ctx.OutputLoc;
+}
diff --git a/clang/lib/Lex/LiteralSupport.cpp b/clang/lib/Lex/LiteralSupport.cpp
index 0a78638f..571a984 100644
--- a/clang/lib/Lex/LiteralSupport.cpp
+++ b/clang/lib/Lex/LiteralSupport.cpp
@@ -1358,11 +1358,17 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
// Handle simple binary numbers 0b01010
if ((c1 == 'b' || c1 == 'B') && (s[1] == '0' || s[1] == '1')) {
- // 0b101010 is a C++1y / GCC extension.
- Diags.Report(TokLoc, LangOpts.CPlusPlus14
- ? diag::warn_cxx11_compat_binary_literal
- : LangOpts.CPlusPlus ? diag::ext_binary_literal_cxx14
- : diag::ext_binary_literal);
+ // 0b101010 is a C++14 and C23 extension.
+ unsigned DiagId;
+ if (LangOpts.CPlusPlus14)
+ DiagId = diag::warn_cxx11_compat_binary_literal;
+ else if (LangOpts.C23)
+ DiagId = diag::warn_c23_compat_binary_literal;
+ else if (LangOpts.CPlusPlus)
+ DiagId = diag::ext_binary_literal_cxx14;
+ else
+ DiagId = diag::ext_binary_literal;
+ Diags.Report(TokLoc, DiagId);
++s;
assert(s < ThisTokEnd && "didn't maximally munch?");
radix = 2;
diff --git a/clang/lib/Parse/ParseTentative.cpp b/clang/lib/Parse/ParseTentative.cpp
index f1737cb..47c8503 100644
--- a/clang/lib/Parse/ParseTentative.cpp
+++ b/clang/lib/Parse/ParseTentative.cpp
@@ -79,9 +79,9 @@ bool Parser::isCXXDeclarationStatement(
getCurScope(), *II, Tok.getLocation(), SS, /*Template=*/nullptr);
if (Actions.isCurrentClassName(*II, getCurScope(), &SS) ||
isDeductionGuide) {
- if (isConstructorDeclarator(/*Unqualified=*/SS.isEmpty(),
- isDeductionGuide,
- DeclSpec::FriendSpecified::No))
+ if (isConstructorDeclarator(
+ /*Unqualified=*/SS.isEmpty(), isDeductionGuide,
+ /*IsFriend=*/DeclSpec::FriendSpecified::No))
return true;
} else if (SS.isNotEmpty()) {
// If the scope is not empty, it could alternatively be something like
diff --git a/clang/lib/Sema/DeclSpec.cpp b/clang/lib/Sema/DeclSpec.cpp
index 313f073..aede602 100644
--- a/clang/lib/Sema/DeclSpec.cpp
+++ b/clang/lib/Sema/DeclSpec.cpp
@@ -1102,18 +1102,13 @@ bool DeclSpec::setFunctionSpecNoreturn(SourceLocation Loc,
bool DeclSpec::SetFriendSpec(SourceLocation Loc, const char *&PrevSpec,
unsigned &DiagID) {
- if (Friend_specified) {
+ if (isFriendSpecified()) {
PrevSpec = "friend";
- // Keep the later location, so that we can later diagnose ill-formed
- // declarations like 'friend class X friend;'. Per [class.friend]p3,
- // 'friend' must be the first token in a friend declaration that is
- // not a function declaration.
- FriendLoc = Loc;
DiagID = diag::warn_duplicate_declspec;
return true;
}
- Friend_specified = true;
+ FriendSpecifiedFirst = isEmpty();
FriendLoc = Loc;
return false;
}
diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp
index be23c0f..09a35fd 100644
--- a/clang/lib/Sema/SemaDecl.cpp
+++ b/clang/lib/Sema/SemaDecl.cpp
@@ -2960,6 +2960,9 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
S.mergeHLSLNumThreadsAttr(D, *NT, NT->getX(), NT->getY(), NT->getZ());
else if (const auto *SA = dyn_cast<HLSLShaderAttr>(Attr))
NewAttr = S.mergeHLSLShaderAttr(D, *SA, SA->getType());
+ else if (const auto *SupA = dyn_cast<SuppressAttr>(Attr))
+ // Do nothing. Each redeclaration should be suppressed separately.
+ NewAttr = nullptr;
else if (Attr->shouldInheritEvenIfAlreadyPresent() || !DeclHasAttr(D, Attr))
NewAttr = cast<InheritableAttr>(Attr->clone(S.Context));
@@ -6839,21 +6842,21 @@ Sema::ActOnTypedefNameDecl(Scope *S, DeclContext *DC, TypedefNameDecl *NewTD,
if (IdentifierInfo *II = NewTD->getIdentifier())
if (!NewTD->isInvalidDecl() &&
NewTD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
- switch (II->getInterestingIdentifierID()) {
- case tok::InterestingIdentifierKind::FILE:
+ switch (II->getNotableIdentifierID()) {
+ case tok::NotableIdentifierKind::FILE:
Context.setFILEDecl(NewTD);
break;
- case tok::InterestingIdentifierKind::jmp_buf:
+ case tok::NotableIdentifierKind::jmp_buf:
Context.setjmp_bufDecl(NewTD);
break;
- case tok::InterestingIdentifierKind::sigjmp_buf:
+ case tok::NotableIdentifierKind::sigjmp_buf:
Context.setsigjmp_bufDecl(NewTD);
break;
- case tok::InterestingIdentifierKind::ucontext_t:
+ case tok::NotableIdentifierKind::ucontext_t:
Context.setucontext_tDecl(NewTD);
break;
- case tok::InterestingIdentifierKind::float_t:
- case tok::InterestingIdentifierKind::double_t:
+ case tok::NotableIdentifierKind::float_t:
+ case tok::NotableIdentifierKind::double_t:
NewTD->addAttr(AvailableOnlyInDefaultEvalMethodAttr::Create(Context));
break;
default:
@@ -17264,6 +17267,26 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
return true;
}
+ if (TUK == TUK_Friend && Kind == TagTypeKind::Enum) {
+ // C++23 [dcl.type.elab]p4:
+ // If an elaborated-type-specifier appears with the friend specifier as
+ // an entire member-declaration, the member-declaration shall have one
+ // of the following forms:
+ // friend class-key nested-name-specifier(opt) identifier ;
+ // friend class-key simple-template-id ;
+ // friend class-key nested-name-specifier template(opt)
+ // simple-template-id ;
+ //
+ // Since enum is not a class-key, so declarations like "friend enum E;"
+ // are ill-formed. Although CWG2363 reaffirms that such declarations are
+ // invalid, most implementations accept so we issue a pedantic warning.
+ Diag(KWLoc, diag::ext_enum_friend) << FixItHint::CreateRemoval(
+ ScopedEnum ? SourceRange(KWLoc, ScopedEnumKWLoc) : KWLoc);
+ assert(ScopedEnum || !ScopedEnumUsesClassTag);
+ Diag(KWLoc, diag::note_enum_friend)
+ << (ScopedEnum + ScopedEnumUsesClassTag);
+ }
+
// Figure out the underlying type if this a enum declaration. We need to do
// this early, because it's needed to detect if this is an incompatible
// redeclaration.
diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp
index d552695..8a204b1 100644
--- a/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/clang/lib/Sema/SemaDeclAttr.cpp
@@ -5260,11 +5260,6 @@ static void handleSuppressAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Suppression attribute with GSL spelling requires at least 1 argument.
if (!AL.checkAtLeastNumArgs(S, 1))
return;
- } else if (!isa<VarDecl>(D)) {
- // Analyzer suppression applies only to variables and statements.
- S.Diag(AL.getLoc(), diag::err_attribute_wrong_decl_type_str)
- << AL << 0 << "variables and statements";
- return;
}
std::vector<StringRef> DiagnosticIdentifiers;
diff --git a/clang/lib/Sema/SemaDeclCXX.cpp b/clang/lib/Sema/SemaDeclCXX.cpp
index ba233c9..79263bc 100644
--- a/clang/lib/Sema/SemaDeclCXX.cpp
+++ b/clang/lib/Sema/SemaDeclCXX.cpp
@@ -17545,79 +17545,6 @@ Decl *Sema::BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
return Decl;
}
-/// Perform semantic analysis of the given friend type declaration.
-///
-/// \returns A friend declaration that.
-FriendDecl *Sema::CheckFriendTypeDecl(SourceLocation LocStart,
- SourceLocation FriendLoc,
- TypeSourceInfo *TSInfo) {
- assert(TSInfo && "NULL TypeSourceInfo for friend type declaration");
-
- QualType T = TSInfo->getType();
- SourceRange TypeRange = TSInfo->getTypeLoc().getSourceRange();
-
- // C++03 [class.friend]p2:
- // An elaborated-type-specifier shall be used in a friend declaration
- // for a class.*
- //
- // * The class-key of the elaborated-type-specifier is required.
- if (!CodeSynthesisContexts.empty()) {
- // Do not complain about the form of friend template types during any kind
- // of code synthesis. For template instantiation, we will have complained
- // when the template was defined.
- } else {
- if (!T->isElaboratedTypeSpecifier()) {
- // If we evaluated the type to a record type, suggest putting
- // a tag in front.
- if (const RecordType *RT = T->getAs<RecordType>()) {
- RecordDecl *RD = RT->getDecl();
-
- SmallString<16> InsertionText(" ");
- InsertionText += RD->getKindName();
-
- Diag(TypeRange.getBegin(),
- getLangOpts().CPlusPlus11 ?
- diag::warn_cxx98_compat_unelaborated_friend_type :
- diag::ext_unelaborated_friend_type)
- << (unsigned) RD->getTagKind()
- << T
- << FixItHint::CreateInsertion(getLocForEndOfToken(FriendLoc),
- InsertionText);
- } else {
- Diag(FriendLoc,
- getLangOpts().CPlusPlus11 ?
- diag::warn_cxx98_compat_nonclass_type_friend :
- diag::ext_nonclass_type_friend)
- << T
- << TypeRange;
- }
- } else if (T->getAs<EnumType>()) {
- Diag(FriendLoc,
- getLangOpts().CPlusPlus11 ?
- diag::warn_cxx98_compat_enum_friend :
- diag::ext_enum_friend)
- << T
- << TypeRange;
- }
-
- // C++11 [class.friend]p3:
- // A friend declaration that does not declare a function shall have one
- // of the following forms:
- // friend elaborated-type-specifier ;
- // friend simple-type-specifier ;
- // friend typename-specifier ;
- if (getLangOpts().CPlusPlus11 && LocStart != FriendLoc)
- Diag(FriendLoc, diag::err_friend_not_first_in_declaration) << T;
- }
-
- // If the type specifier in a friend declaration designates a (possibly
- // cv-qualified) class type, that class is declared as a friend; otherwise,
- // the friend declaration is ignored.
- return FriendDecl::Create(Context, CurContext,
- TSInfo->getTypeLoc().getBeginLoc(), TSInfo,
- FriendLoc);
-}
-
/// Handle a friend tag declaration where the scope specifier was
/// templated.
DeclResult Sema::ActOnTemplatedFriendTag(
@@ -17755,6 +17682,7 @@ DeclResult Sema::ActOnTemplatedFriendTag(
Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TempParams) {
SourceLocation Loc = DS.getBeginLoc();
+ SourceLocation FriendLoc = DS.getFriendSpecLoc();
assert(DS.isFriendSpecified());
assert(DS.getStorageClassSpec() == DeclSpec::SCS_unspecified);
@@ -17766,9 +17694,10 @@ Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
// friend simple-type-specifier ;
// friend typename-specifier ;
//
- // Any declaration with a type qualifier does not have that form. (It's
- // legal to specify a qualified type as a friend, you just can't write the
- // keywords.)
+ // If the friend keyword isn't first, or if the declarations has any type
+ // qualifiers, then the declaration doesn't have that form.
+ if (getLangOpts().CPlusPlus11 && !DS.isFriendSpecifiedFirst())
+ Diag(FriendLoc, diag::err_friend_not_first_in_declaration);
if (DS.getTypeQualifiers()) {
if (DS.getTypeQualifiers() & DeclSpec::TQ_const)
Diag(DS.getConstSpecLoc(), diag::err_friend_decl_spec) << "const";
@@ -17795,24 +17724,35 @@ Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
if (DiagnoseUnexpandedParameterPack(Loc, TSI, UPPC_FriendDeclaration))
return nullptr;
- // This is definitely an error in C++98. It's probably meant to
- // be forbidden in C++0x, too, but the specification is just
- // poorly written.
- //
- // The problem is with declarations like the following:
- // template <T> friend A<T>::foo;
- // where deciding whether a class C is a friend or not now hinges
- // on whether there exists an instantiation of A that causes
- // 'foo' to equal C. There are restrictions on class-heads
- // (which we declare (by fiat) elaborated friend declarations to
- // be) that makes this tractable.
- //
- // FIXME: handle "template <> friend class A<T>;", which
- // is possibly well-formed? Who even knows?
- if (TempParams.size() && !T->isElaboratedTypeSpecifier()) {
- Diag(Loc, diag::err_tagless_friend_type_template)
- << DS.getSourceRange();
- return nullptr;
+ if (!T->isElaboratedTypeSpecifier()) {
+ if (TempParams.size()) {
+ // C++23 [dcl.pre]p5:
+ // In a simple-declaration, the optional init-declarator-list can be
+ // omitted only when declaring a class or enumeration, that is, when
+ // the decl-specifier-seq contains either a class-specifier, an
+ // elaborated-type-specifier with a class-key, or an enum-specifier.
+ //
+ // The declaration of a template-declaration or explicit-specialization
+ // is never a member-declaration, so this must be a simple-declaration
+ // with no init-declarator-list. Therefore, this is ill-formed.
+ Diag(Loc, diag::err_tagless_friend_type_template) << DS.getSourceRange();
+ return nullptr;
+ } else if (const RecordDecl *RD = T->getAsRecordDecl()) {
+ SmallString<16> InsertionText(" ");
+ InsertionText += RD->getKindName();
+
+ Diag(Loc, getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_unelaborated_friend_type
+ : diag::ext_unelaborated_friend_type)
+ << (unsigned)RD->getTagKind() << T
+ << FixItHint::CreateInsertion(getLocForEndOfToken(FriendLoc),
+ InsertionText);
+ } else {
+ Diag(FriendLoc, getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_nonclass_type_friend
+ : diag::ext_nonclass_type_friend)
+ << T << DS.getSourceRange();
+ }
}
// C++98 [class.friend]p1: A friend of a class is a function
@@ -17828,12 +17768,11 @@ Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
Decl *D;
if (!TempParams.empty())
- D = FriendTemplateDecl::Create(Context, CurContext, Loc,
- TempParams,
- TSI,
- DS.getFriendSpecLoc());
+ D = FriendTemplateDecl::Create(Context, CurContext, Loc, TempParams, TSI,
+ FriendLoc);
else
- D = CheckFriendTypeDecl(Loc, DS.getFriendSpecLoc(), TSI);
+ D = FriendDecl::Create(Context, CurContext, TSI->getTypeLoc().getBeginLoc(),
+ TSI, FriendLoc);
if (!D)
return nullptr;
diff --git a/clang/lib/Sema/SemaExceptionSpec.cpp b/clang/lib/Sema/SemaExceptionSpec.cpp
index 8d58ef5..3563b4f 100644
--- a/clang/lib/Sema/SemaExceptionSpec.cpp
+++ b/clang/lib/Sema/SemaExceptionSpec.cpp
@@ -1423,6 +1423,7 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
llvm_unreachable("Invalid class for expression");
// Most statements can throw if any substatement can throw.
+ case Stmt::OpenACCComputeConstructClass:
case Stmt::AttributedStmtClass:
case Stmt::BreakStmtClass:
case Stmt::CapturedStmtClass:
diff --git a/clang/lib/Sema/SemaTemplateDeduction.cpp b/clang/lib/Sema/SemaTemplateDeduction.cpp
index 994c997..47cc223 100644
--- a/clang/lib/Sema/SemaTemplateDeduction.cpp
+++ b/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -5642,9 +5642,12 @@ FunctionTemplateDecl *Sema::getMoreSpecializedTemplate(
Sema::TPL_TemplateParamsEquivalent))
return nullptr;
+ // [dcl.fct]p5:
+ // Any top-level cv-qualifiers modifying a parameter type are deleted when
+ // forming the function type.
for (unsigned i = 0; i < NumParams1; ++i)
- if (!Context.hasSameType(FD1->getParamDecl(i)->getType(),
- FD2->getParamDecl(i)->getType()))
+ if (!Context.hasSameUnqualifiedType(FD1->getParamDecl(i)->getType(),
+ FD2->getParamDecl(i)->getType()))
return nullptr;
// C++20 [temp.func.order]p6.3:
diff --git a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index d67b21b..9c696e0 100644
--- a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -1407,11 +1407,8 @@ Decl *TemplateDeclInstantiator::VisitFriendDecl(FriendDecl *D) {
if (!InstTy)
return nullptr;
- FriendDecl *FD = SemaRef.CheckFriendTypeDecl(D->getBeginLoc(),
- D->getFriendLoc(), InstTy);
- if (!FD)
- return nullptr;
-
+ FriendDecl *FD = FriendDecl::Create(
+ SemaRef.Context, Owner, D->getLocation(), InstTy, D->getFriendLoc());
FD->setAccess(AS_public);
FD->setUnsupportedFriend(D->isUnsupportedFriend());
Owner->addDecl(FD);
diff --git a/clang/lib/Sema/TreeTransform.h b/clang/lib/Sema/TreeTransform.h
index 3ed17c3..6e5ae12 100644
--- a/clang/lib/Sema/TreeTransform.h
+++ b/clang/lib/Sema/TreeTransform.h
@@ -27,6 +27,7 @@
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtOpenACC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/Basic/DiagnosticParse.h"
#include "clang/Basic/OpenMPKinds.h"
@@ -3995,6 +3996,13 @@ public:
return getSema().CreateRecoveryExpr(BeginLoc, EndLoc, SubExprs, Type);
}
+ StmtResult RebuildOpenACCComputeConstruct(OpenACCDirectiveKind K,
+ SourceLocation BeginLoc,
+ SourceLocation EndLoc,
+ StmtResult StrBlock) {
+ llvm_unreachable("Not yet implemented!");
+ }
+
private:
TypeLoc TransformTypeInObjectScope(TypeLoc TL,
QualType ObjectType,
@@ -10994,6 +11002,21 @@ OMPClause *TreeTransform<Derived>::TransformOMPXBareClause(OMPXBareClause *C) {
}
//===----------------------------------------------------------------------===//
+// OpenACC transformation
+//===----------------------------------------------------------------------===//
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOpenACCComputeConstruct(
+ OpenACCComputeConstruct *C) {
+ // TODO OpenACC: Transform clauses.
+
+ // Transform Structured Block.
+ StmtResult StrBlock = getDerived().TransformStmt(C->getStructuredBlock());
+
+ return getDerived().RebuildOpenACCComputeConstruct(
+ C->getDirectiveKind(), C->getBeginLoc(), C->getEndLoc(), StrBlock);
+}
+
+//===----------------------------------------------------------------------===//
// Expression transformation
//===----------------------------------------------------------------------===//
template<typename Derived>
diff --git a/clang/lib/Serialization/ASTReader.cpp b/clang/lib/Serialization/ASTReader.cpp
index eea14a6..683a076 100644
--- a/clang/lib/Serialization/ASTReader.cpp
+++ b/clang/lib/Serialization/ASTReader.cpp
@@ -988,8 +988,7 @@ ASTIdentifierLookupTraitBase::ReadKey(const unsigned char* d, unsigned n) {
static bool isInterestingIdentifier(ASTReader &Reader, IdentifierInfo &II,
bool IsModule) {
bool IsInteresting =
- II.getInterestingIdentifierID() !=
- tok::InterestingIdentifierKind::not_interesting ||
+ II.getNotableIdentifierID() != tok::NotableIdentifierKind::not_notable ||
II.getBuiltinID() != Builtin::ID::NotBuiltin ||
II.getObjCKeywordID() != tok::ObjCKeywordKind::objc_not_keyword;
return II.hadMacroDefinition() || II.isPoisoned() ||
diff --git a/clang/lib/Serialization/ASTReaderStmt.cpp b/clang/lib/Serialization/ASTReaderStmt.cpp
index d79f194..3da44ff 100644
--- a/clang/lib/Serialization/ASTReaderStmt.cpp
+++ b/clang/lib/Serialization/ASTReaderStmt.cpp
@@ -2789,6 +2789,26 @@ void ASTStmtReader::VisitOMPTargetParallelGenericLoopDirective(
}
//===----------------------------------------------------------------------===//
+// OpenACC Constructs/Directives.
+//===----------------------------------------------------------------------===//
+void ASTStmtReader::VisitOpenACCConstructStmt(OpenACCConstructStmt *S) {
+ S->Kind = Record.readEnum<OpenACCDirectiveKind>();
+ S->Range = Record.readSourceRange();
+ // TODO OpenACC: Deserialize Clauses.
+}
+
+void ASTStmtReader::VisitOpenACCAssociatedStmtConstruct(
+ OpenACCAssociatedStmtConstruct *S) {
+ VisitOpenACCConstructStmt(S);
+ S->setAssociatedStmt(Record.readSubStmt());
+}
+
+void ASTStmtReader::VisitOpenACCComputeConstruct(OpenACCComputeConstruct *S) {
+ VisitStmt(S);
+ VisitOpenACCConstructStmt(S);
+}
+
+//===----------------------------------------------------------------------===//
// ASTReader Implementation
//===----------------------------------------------------------------------===//
@@ -4206,6 +4226,9 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
S = new (Context) ConceptSpecializationExpr(Empty);
break;
}
+ case STMT_OPENACC_COMPUTE_CONSTRUCT:
+ S = OpenACCComputeConstruct::CreateEmpty(Context, Empty);
+ break;
case EXPR_REQUIRES:
unsigned numLocalParameters = Record[ASTStmtReader::NumExprFields];
diff --git a/clang/lib/Serialization/ASTWriter.cpp b/clang/lib/Serialization/ASTWriter.cpp
index 7966b31..740bec5 100644
--- a/clang/lib/Serialization/ASTWriter.cpp
+++ b/clang/lib/Serialization/ASTWriter.cpp
@@ -3599,8 +3599,8 @@ class ASTIdentifierTableTrait {
bool isInterestingIdentifier(const IdentifierInfo *II, uint64_t MacroOffset) {
II->getObjCOrBuiltinID();
bool IsInteresting =
- II->getInterestingIdentifierID() !=
- tok::InterestingIdentifierKind::not_interesting ||
+ II->getNotableIdentifierID() !=
+ tok::NotableIdentifierKind::not_notable ||
II->getBuiltinID() != Builtin::ID::NotBuiltin ||
II->getObjCKeywordID() != tok::ObjCKeywordKind::objc_not_keyword;
if (MacroOffset || II->isPoisoned() || (!IsModule && IsInteresting) ||
diff --git a/clang/lib/Serialization/ASTWriterStmt.cpp b/clang/lib/Serialization/ASTWriterStmt.cpp
index 5b0b902..484621a 100644
--- a/clang/lib/Serialization/ASTWriterStmt.cpp
+++ b/clang/lib/Serialization/ASTWriterStmt.cpp
@@ -2839,6 +2839,27 @@ void ASTStmtWriter::VisitOMPTargetParallelGenericLoopDirective(
}
//===----------------------------------------------------------------------===//
+// OpenACC Constructs/Directives.
+//===----------------------------------------------------------------------===//
+void ASTStmtWriter::VisitOpenACCConstructStmt(OpenACCConstructStmt *S) {
+ Record.writeEnum(S->Kind);
+ Record.AddSourceRange(S->Range);
+ // TODO OpenACC: Serialize Clauses.
+}
+
+void ASTStmtWriter::VisitOpenACCAssociatedStmtConstruct(
+ OpenACCAssociatedStmtConstruct *S) {
+ VisitOpenACCConstructStmt(S);
+ Record.AddStmt(S->getAssociatedStmt());
+}
+
+void ASTStmtWriter::VisitOpenACCComputeConstruct(OpenACCComputeConstruct *S) {
+ VisitStmt(S);
+ VisitOpenACCConstructStmt(S);
+ Code = serialization::STMT_OPENACC_COMPUTE_CONSTRUCT;
+}
+
+//===----------------------------------------------------------------------===//
// ASTWriter Implementation
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
index a09db6d..837cbbc 100644
--- a/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
@@ -23,19 +23,19 @@ using namespace clang;
using namespace ento;
namespace {
- class BoolAssignmentChecker : public Checker< check::Bind > {
- const BugType BT{this, "Assignment of a non-Boolean value"};
- void emitReport(ProgramStateRef state, CheckerContext &C,
- bool IsTainted = false) const;
-
- public:
- void checkBind(SVal loc, SVal val, const Stmt *S, CheckerContext &C) const;
- };
+class BoolAssignmentChecker : public Checker<check::Bind> {
+ const BugType BT{this, "Assignment of a non-Boolean value"};
+ void emitReport(ProgramStateRef State, CheckerContext &C,
+ bool IsTainted = false) const;
+
+public:
+ void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &C) const;
+};
} // end anonymous namespace
-void BoolAssignmentChecker::emitReport(ProgramStateRef state, CheckerContext &C,
+void BoolAssignmentChecker::emitReport(ProgramStateRef State, CheckerContext &C,
bool IsTainted) const {
- if (ExplodedNode *N = C.generateNonFatalErrorNode(state)) {
+ if (ExplodedNode *N = C.generateNonFatalErrorNode(State)) {
StringRef Msg = IsTainted ? "Might assign a tainted non-Boolean value"
: "Assignment of a non-Boolean value";
C.emitReport(std::make_unique<PathSensitiveBugReport>(BT, Msg, N));
@@ -47,59 +47,58 @@ static bool isBooleanType(QualType Ty) {
return true;
if (const TypedefType *TT = Ty->getAs<TypedefType>())
- return TT->getDecl()->getName() == "BOOL" || // Objective-C
- TT->getDecl()->getName() == "_Bool" || // stdbool.h < C99
- TT->getDecl()->getName() == "Boolean"; // MacTypes.h
+ return TT->getDecl()->getName() == "BOOL" || // Objective-C
+ TT->getDecl()->getName() == "_Bool" || // stdbool.h < C99
+ TT->getDecl()->getName() == "Boolean"; // MacTypes.h
return false;
}
-void BoolAssignmentChecker::checkBind(SVal loc, SVal val, const Stmt *S,
+void BoolAssignmentChecker::checkBind(SVal Loc, SVal Val, const Stmt *S,
CheckerContext &C) const {
// We are only interested in stores into Booleans.
const TypedValueRegion *TR =
- dyn_cast_or_null<TypedValueRegion>(loc.getAsRegion());
+ dyn_cast_or_null<TypedValueRegion>(Loc.getAsRegion());
if (!TR)
return;
- QualType valTy = TR->getValueType();
+ QualType RegTy = TR->getValueType();
- if (!isBooleanType(valTy))
+ if (!isBooleanType(RegTy))
return;
// Get the value of the right-hand side. We only care about values
// that are defined (UnknownVals and UndefinedVals are handled by other
// checkers).
- std::optional<NonLoc> NV = val.getAs<NonLoc>();
+ std::optional<NonLoc> NV = Val.getAs<NonLoc>();
if (!NV)
return;
// Check if the assigned value meets our criteria for correctness. It must
// be a value that is either 0 or 1. One way to check this is to see if
// the value is possibly < 0 (for a negative value) or greater than 1.
- ProgramStateRef state = C.getState();
- SValBuilder &svalBuilder = C.getSValBuilder();
- BasicValueFactory &BVF = svalBuilder.getBasicValueFactory();
+ ProgramStateRef State = C.getState();
+ BasicValueFactory &BVF = C.getSValBuilder().getBasicValueFactory();
ConstraintManager &CM = C.getConstraintManager();
- llvm::APSInt Zero = BVF.getValue(0, valTy);
- llvm::APSInt One = BVF.getValue(1, valTy);
+ llvm::APSInt Zero = BVF.getValue(0, RegTy);
+ llvm::APSInt One = BVF.getValue(1, RegTy);
ProgramStateRef StIn, StOut;
- std::tie(StIn, StOut) = CM.assumeInclusiveRangeDual(state, *NV, Zero, One);
+ std::tie(StIn, StOut) = CM.assumeInclusiveRangeDual(State, *NV, Zero, One);
if (!StIn)
emitReport(StOut, C);
- if (StIn && StOut && taint::isTainted(state, *NV))
+ if (StIn && StOut && taint::isTainted(State, *NV))
emitReport(StOut, C, /*IsTainted=*/true);
}
-void ento::registerBoolAssignmentChecker(CheckerManager &mgr) {
- mgr.registerChecker<BoolAssignmentChecker>();
+void ento::registerBoolAssignmentChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<BoolAssignmentChecker>();
}
-bool ento::shouldRegisterBoolAssignmentChecker(const CheckerManager &mgr) {
+bool ento::shouldRegisterBoolAssignmentChecker(const CheckerManager &Mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
index 1c2d842..2f2df63 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
@@ -161,8 +161,8 @@ static void checkObjCUnusedIvar(const ObjCImplementationDecl *D,
PathDiagnosticLocation L =
PathDiagnosticLocation::create(Ivar, BR.getSourceManager());
- BR.EmitBasicReport(D, Checker, "Unused instance variable", "Optimization",
- os.str(), L);
+ BR.EmitBasicReport(ID, Checker, "Unused instance variable",
+ "Optimization", os.str(), L);
}
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp
index 4526fac..94eaa81 100644
--- a/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp
@@ -19,6 +19,10 @@ namespace clang {
std::pair<const Expr *, bool>
tryToFindPtrOrigin(const Expr *E, bool StopAtFirstRefCountedObj) {
while (E) {
+ if (auto *tempExpr = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ E = tempExpr->getSubExpr();
+ continue;
+ }
if (auto *cast = dyn_cast<CastExpr>(E)) {
if (StopAtFirstRefCountedObj) {
if (auto *ConversionFunc =
@@ -63,6 +67,12 @@ tryToFindPtrOrigin(const Expr *E, bool StopAtFirstRefCountedObj) {
continue;
}
+ if (isReturnValueRefCounted(callee))
+ return {E, true};
+
+ if (isSingleton(callee))
+ return {E, true};
+
if (isPtrConversion(callee)) {
E = call->getArg(0);
continue;
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
index 96784d4..bf6f9a6 100644
--- a/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
@@ -12,6 +12,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/StmtVisitor.h"
#include <optional>
using namespace clang;
@@ -119,6 +120,26 @@ bool isCtorOfRefCounted(const clang::FunctionDecl *F) {
|| FunctionName == "Identifier";
}
+bool isReturnValueRefCounted(const clang::FunctionDecl *F) {
+ assert(F);
+ QualType type = F->getReturnType();
+ while (!type.isNull()) {
+ if (auto *elaboratedT = type->getAs<ElaboratedType>()) {
+ type = elaboratedT->desugar();
+ continue;
+ }
+ if (auto *specialT = type->getAs<TemplateSpecializationType>()) {
+ if (auto *decl = specialT->getTemplateName().getAsTemplateDecl()) {
+ auto name = decl->getNameAsString();
+ return name == "Ref" || name == "RefPtr";
+ }
+ return false;
+ }
+ return false;
+ }
+ return false;
+}
+
std::optional<bool> isUncounted(const CXXRecordDecl* Class)
{
// Keep isRefCounted first as it's cheaper.
@@ -194,11 +215,224 @@ bool isPtrConversion(const FunctionDecl *F) {
// FIXME: check # of params == 1
const auto FunctionName = safeGetName(F);
if (FunctionName == "getPtr" || FunctionName == "WeakPtr" ||
- FunctionName == "dynamicDowncast"
- || FunctionName == "downcast" || FunctionName == "bitwise_cast")
+ FunctionName == "dynamicDowncast" || FunctionName == "downcast" ||
+ FunctionName == "checkedDowncast" ||
+ FunctionName == "uncheckedDowncast" || FunctionName == "bitwise_cast")
return true;
return false;
}
+bool isSingleton(const FunctionDecl *F) {
+ assert(F);
+ // FIXME: check # of params == 1
+ if (auto *MethodDecl = dyn_cast<CXXMethodDecl>(F)) {
+ if (!MethodDecl->isStatic())
+ return false;
+ }
+ const auto &Name = safeGetName(F);
+ std::string SingletonStr = "singleton";
+ auto index = Name.find(SingletonStr);
+ return index != std::string::npos &&
+ index == Name.size() - SingletonStr.size();
+}
+
+// We only care about statements so let's use the simple
+// (non-recursive) visitor.
+class TrivialFunctionAnalysisVisitor
+ : public ConstStmtVisitor<TrivialFunctionAnalysisVisitor, bool> {
+
+ // Returns false if at least one child is non-trivial.
+ bool VisitChildren(const Stmt *S) {
+ for (const Stmt *Child : S->children()) {
+ if (Child && !Visit(Child))
+ return false;
+ }
+
+ return true;
+ }
+
+public:
+ using CacheTy = TrivialFunctionAnalysis::CacheTy;
+
+ TrivialFunctionAnalysisVisitor(CacheTy &Cache) : Cache(Cache) {}
+
+ bool VisitStmt(const Stmt *S) {
+ // All statements are non-trivial unless overriden later.
+ // Don't even recurse into children by default.
+ return false;
+ }
+
+ bool VisitCompoundStmt(const CompoundStmt *CS) {
+ // A compound statement is allowed as long each individual sub-statement
+ // is trivial.
+ return VisitChildren(CS);
+ }
+
+ bool VisitReturnStmt(const ReturnStmt *RS) {
+ // A return statement is allowed as long as the return value is trivial.
+ if (auto *RV = RS->getRetValue())
+ return Visit(RV);
+ return true;
+ }
+
+ bool VisitDeclStmt(const DeclStmt *DS) { return VisitChildren(DS); }
+ bool VisitDoStmt(const DoStmt *DS) { return VisitChildren(DS); }
+ bool VisitIfStmt(const IfStmt *IS) { return VisitChildren(IS); }
+ bool VisitSwitchStmt(const SwitchStmt *SS) { return VisitChildren(SS); }
+ bool VisitCaseStmt(const CaseStmt *CS) { return VisitChildren(CS); }
+ bool VisitDefaultStmt(const DefaultStmt *DS) { return VisitChildren(DS); }
+
+ bool VisitUnaryOperator(const UnaryOperator *UO) {
+ // Operator '*' and '!' are allowed as long as the operand is trivial.
+ if (UO->getOpcode() == UO_Deref || UO->getOpcode() == UO_LNot)
+ return Visit(UO->getSubExpr());
+
+ // Other operators are non-trivial.
+ return false;
+ }
+
+ bool VisitBinaryOperator(const BinaryOperator *BO) {
+ // Binary operators are trivial if their operands are trivial.
+ return Visit(BO->getLHS()) && Visit(BO->getRHS());
+ }
+
+ bool VisitConditionalOperator(const ConditionalOperator *CO) {
+ // Ternary operators are trivial if their conditions & values are trivial.
+ return VisitChildren(CO);
+ }
+
+ bool VisitDeclRefExpr(const DeclRefExpr *DRE) {
+ if (auto *decl = DRE->getDecl()) {
+ if (isa<ParmVarDecl>(decl))
+ return true;
+ }
+ return false;
+ }
+
+ bool VisitStaticAssertDecl(const StaticAssertDecl *SAD) {
+ // Any static_assert is considered trivial.
+ return true;
+ }
+
+ bool VisitCallExpr(const CallExpr *CE) {
+ if (!checkArguments(CE))
+ return false;
+
+ auto *Callee = CE->getDirectCallee();
+ if (!Callee)
+ return false;
+ const auto &Name = safeGetName(Callee);
+
+ if (Name == "WTFCrashWithInfo" || Name == "WTFBreakpointTrap" ||
+ Name == "compilerFenceForCrash" || Name == "__builtin_unreachable")
+ return true;
+
+ return TrivialFunctionAnalysis::isTrivialImpl(Callee, Cache);
+ }
+
+ bool VisitCXXMemberCallExpr(const CXXMemberCallExpr *MCE) {
+ if (!checkArguments(MCE))
+ return false;
+
+ bool TrivialThis = Visit(MCE->getImplicitObjectArgument());
+ if (!TrivialThis)
+ return false;
+
+ auto *Callee = MCE->getMethodDecl();
+ if (!Callee)
+ return false;
+
+ std::optional<bool> IsGetterOfRefCounted = isGetterOfRefCounted(Callee);
+ if (IsGetterOfRefCounted && *IsGetterOfRefCounted)
+ return true;
+
+ // Recursively descend into the callee to confirm that it's trivial as well.
+ return TrivialFunctionAnalysis::isTrivialImpl(Callee, Cache);
+ }
+
+ bool checkArguments(const CallExpr *CE) {
+ for (const Expr *Arg : CE->arguments()) {
+ if (Arg && !Visit(Arg))
+ return false;
+ }
+ return true;
+ }
+
+ bool VisitCXXConstructExpr(const CXXConstructExpr *CE) {
+ for (const Expr *Arg : CE->arguments()) {
+ if (Arg && !Visit(Arg))
+ return false;
+ }
+
+ // Recursively descend into the callee to confirm that it's trivial.
+ return TrivialFunctionAnalysis::isTrivialImpl(CE->getConstructor(), Cache);
+ }
+
+ bool VisitImplicitCastExpr(const ImplicitCastExpr *ICE) {
+ return Visit(ICE->getSubExpr());
+ }
+
+ bool VisitExplicitCastExpr(const ExplicitCastExpr *ECE) {
+ return Visit(ECE->getSubExpr());
+ }
+
+ bool VisitParenExpr(const ParenExpr *PE) { return Visit(PE->getSubExpr()); }
+
+ bool VisitInitListExpr(const InitListExpr *ILE) {
+ for (const Expr *Child : ILE->inits()) {
+ if (Child && !Visit(Child))
+ return false;
+ }
+ return true;
+ }
+
+ bool VisitMemberExpr(const MemberExpr *ME) {
+ // Field access is allowed but the base pointer may itself be non-trivial.
+ return Visit(ME->getBase());
+ }
+
+ bool VisitCXXThisExpr(const CXXThisExpr *CTE) {
+ // The expression 'this' is always trivial, be it explicit or implicit.
+ return true;
+ }
+
+ // Constant literal expressions are always trivial
+ bool VisitIntegerLiteral(const IntegerLiteral *E) { return true; }
+ bool VisitFloatingLiteral(const FloatingLiteral *E) { return true; }
+ bool VisitFixedPointLiteral(const FixedPointLiteral *E) { return true; }
+ bool VisitCharacterLiteral(const CharacterLiteral *E) { return true; }
+ bool VisitStringLiteral(const StringLiteral *E) { return true; }
+
+ bool VisitConstantExpr(const ConstantExpr *CE) {
+ // Constant expressions are trivial.
+ return true;
+ }
+
+private:
+ CacheTy Cache;
+};
+
+bool TrivialFunctionAnalysis::isTrivialImpl(
+ const Decl *D, TrivialFunctionAnalysis::CacheTy &Cache) {
+ // If the function isn't in the cache, conservatively assume that
+ // it's not trivial until analysis completes. This makes every recursive
+ // function non-trivial. This also guarantees that each function
+ // will be scanned at most once.
+ auto [It, IsNew] = Cache.insert(std::make_pair(D, false));
+ if (!IsNew)
+ return It->second;
+
+ const Stmt *Body = D->getBody();
+ if (!Body)
+ return false;
+
+ TrivialFunctionAnalysisVisitor V(Cache);
+ bool Result = V.Visit(Body);
+ if (Result)
+ Cache[D] = true;
+
+ return Result;
+}
+
} // namespace clang
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h b/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h
index 45b21cc..e07cd31 100644
--- a/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h
@@ -10,12 +10,14 @@
#define LLVM_CLANG_ANALYZER_WEBKIT_PTRTYPESEMANTICS_H
#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/DenseMap.h"
#include <optional>
namespace clang {
class CXXBaseSpecifier;
class CXXMethodDecl;
class CXXRecordDecl;
+class Decl;
class FunctionDecl;
class Type;
@@ -50,6 +52,9 @@ std::optional<bool> isUncountedPtr(const clang::Type* T);
/// false if not.
bool isCtorOfRefCounted(const clang::FunctionDecl *F);
+/// \returns true if \p F returns a ref-counted object, false if not.
+bool isReturnValueRefCounted(const clang::FunctionDecl *F);
+
/// \returns true if \p M is getter of a ref-counted class, false if not.
std::optional<bool> isGetterOfRefCounted(const clang::CXXMethodDecl* Method);
@@ -57,6 +62,25 @@ std::optional<bool> isGetterOfRefCounted(const clang::CXXMethodDecl* Method);
/// pointer types.
bool isPtrConversion(const FunctionDecl *F);
+/// \returns true if \p F is a static singleton function.
+bool isSingleton(const FunctionDecl *F);
+
+/// An inter-procedural analysis facility that detects functions with "trivial"
+/// behavior with respect to reference counting, such as simple field getters.
+class TrivialFunctionAnalysis {
+public:
+ /// \returns true if \p D is a "trivial" function.
+ bool isTrivial(const Decl *D) const { return isTrivialImpl(D, TheCache); }
+
+private:
+ friend class TrivialFunctionAnalysisVisitor;
+
+ using CacheTy = llvm::DenseMap<const Decl *, bool>;
+ mutable CacheTy TheCache{};
+
+ static bool isTrivialImpl(const Decl *D, CacheTy &Cache);
+};
+
} // namespace clang
#endif
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp
index f4e6191..17a64e1 100644
--- a/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp
@@ -32,6 +32,8 @@ class UncountedCallArgsChecker
"WebKit coding guidelines"};
mutable BugReporter *BR;
+ TrivialFunctionAnalysis TFA;
+
public:
void checkASTDecl(const TranslationUnitDecl *TUD, AnalysisManager &MGR,
@@ -70,6 +72,15 @@ public:
// or std::function call operator).
unsigned ArgIdx = isa<CXXOperatorCallExpr>(CE) && isa_and_nonnull<CXXMethodDecl>(F);
+ if (auto *MemberCallExpr = dyn_cast<CXXMemberCallExpr>(CE)) {
+ auto *E = MemberCallExpr->getImplicitObjectArgument();
+ QualType ArgType = MemberCallExpr->getObjectType();
+ std::optional<bool> IsUncounted =
+ isUncounted(ArgType->getAsCXXRecordDecl());
+ if (IsUncounted && *IsUncounted && !isPtrOriginSafe(E))
+ reportBugOnThis(E);
+ }
+
for (auto P = F->param_begin();
// FIXME: Also check variadic function parameters.
// FIXME: Also check default function arguments. Probably a different
@@ -94,25 +105,7 @@ public:
if (auto *defaultArg = dyn_cast<CXXDefaultArgExpr>(Arg))
Arg = defaultArg->getExpr();
- std::pair<const clang::Expr *, bool> ArgOrigin =
- tryToFindPtrOrigin(Arg, true);
-
- // Temporary ref-counted object created as part of the call argument
- // would outlive the call.
- if (ArgOrigin.second)
- continue;
-
- if (isa<CXXNullPtrLiteralExpr>(ArgOrigin.first)) {
- // foo(nullptr)
- continue;
- }
- if (isa<IntegerLiteral>(ArgOrigin.first)) {
- // FIXME: Check the value.
- // foo(NULL)
- continue;
- }
-
- if (isASafeCallArg(ArgOrigin.first))
+ if (isPtrOriginSafe(Arg))
continue;
reportBug(Arg, *P);
@@ -120,7 +113,34 @@ public:
}
}
+ bool isPtrOriginSafe(const Expr *Arg) const {
+ std::pair<const clang::Expr *, bool> ArgOrigin =
+ tryToFindPtrOrigin(Arg, true);
+
+ // Temporary ref-counted object created as part of the call argument
+ // would outlive the call.
+ if (ArgOrigin.second)
+ return true;
+
+ if (isa<CXXNullPtrLiteralExpr>(ArgOrigin.first)) {
+ // foo(nullptr)
+ return true;
+ }
+ if (isa<IntegerLiteral>(ArgOrigin.first)) {
+ // FIXME: Check the value.
+ // foo(NULL)
+ return true;
+ }
+
+ return isASafeCallArg(ArgOrigin.first);
+ }
+
bool shouldSkipCall(const CallExpr *CE) const {
+ const auto *Callee = CE->getDirectCallee();
+
+ if (Callee && TFA.isTrivial(Callee))
+ return true;
+
if (CE->getNumArgs() == 0)
return false;
@@ -142,7 +162,6 @@ public:
return false;
}
- const auto *Callee = CE->getDirectCallee();
if (!Callee)
return false;
@@ -161,13 +180,14 @@ public:
auto name = safeGetName(Callee);
if (name == "adoptRef" || name == "getPtr" || name == "WeakPtr" ||
- name == "dynamicDowncast" || name == "downcast" || name == "bitwise_cast" ||
- name == "is" || name == "equal" || name == "hash" ||
- name == "isType"
+ name == "dynamicDowncast" || name == "downcast" ||
+ name == "checkedDowncast" || name == "uncheckedDowncast" ||
+ name == "bitwise_cast" || name == "is" || name == "equal" ||
+ name == "hash" || name == "isType" ||
// FIXME: Most/all of these should be implemented via attributes.
- || name == "equalIgnoringASCIICase" ||
+ name == "equalIgnoringASCIICase" ||
name == "equalIgnoringASCIICaseCommon" ||
- name == "equalIgnoringNullity")
+ name == "equalIgnoringNullity" || name == "toString")
return true;
return false;
@@ -196,6 +216,19 @@ public:
Report->addRange(CallArg->getSourceRange());
BR->emitReport(std::move(Report));
}
+
+ void reportBugOnThis(const Expr *CallArg) const {
+ assert(CallArg);
+
+ const SourceLocation SrcLocToReport = CallArg->getSourceRange().getBegin();
+
+ PathDiagnosticLocation BSLoc(SrcLocToReport, BR->getSourceManager());
+ auto Report = std::make_unique<BasicBugReport>(
+ Bug, "Call argument for 'this' parameter is uncounted and unsafe.",
+ BSLoc);
+ Report->addRange(CallArg->getSourceRange());
+ BR->emitReport(std::move(Report));
+ }
};
} // namespace
diff --git a/clang/lib/StaticAnalyzer/Core/BugSuppression.cpp b/clang/lib/StaticAnalyzer/Core/BugSuppression.cpp
index fded071..84004b8 100644
--- a/clang/lib/StaticAnalyzer/Core/BugSuppression.cpp
+++ b/clang/lib/StaticAnalyzer/Core/BugSuppression.cpp
@@ -82,12 +82,12 @@ public:
CacheInitializer(ToInit).TraverseDecl(const_cast<Decl *>(D));
}
- bool VisitVarDecl(VarDecl *VD) {
+ bool VisitDecl(Decl *D) {
// Bug location could be somewhere in the init value of
// a freshly declared variable. Even though it looks like the
// user applied attribute to a statement, it will apply to a
// variable declaration, and this is where we check for it.
- return VisitAttributedNode(VD);
+ return VisitAttributedNode(D);
}
bool VisitAttributedStmt(AttributedStmt *AS) {
@@ -147,6 +147,20 @@ bool BugSuppression::isSuppressed(const PathDiagnosticLocation &Location,
// done as well as perform a lot of work we'll never need.
// Gladly, none of our on-by-default checkers currently need it.
DeclWithIssue = ACtx.getTranslationUnitDecl();
+ } else {
+ // This is the fast path. However, we should still consider the topmost
+ // declaration that isn't TranslationUnitDecl, because we should respect
+ // attributes on the entire declaration chain.
+ while (true) {
+ // Use the "lexical" parent. Eg., if the attribute is on a class, suppress
+ // warnings in inline methods but not in out-of-line methods.
+ const Decl *Parent =
+ dyn_cast_or_null<Decl>(DeclWithIssue->getLexicalDeclContext());
+ if (Parent == nullptr || isa<TranslationUnitDecl>(Parent))
+ break;
+
+ DeclWithIssue = Parent;
+ }
}
// While some warnings are attached to AST nodes (mostly path-sensitive
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
index ccc3c0f..09c69f9 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -1821,6 +1821,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::OMPParallelGenericLoopDirectiveClass:
case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
case Stmt::CapturedStmtClass:
+ case Stmt::OpenACCComputeConstructClass:
case Stmt::OMPUnrollDirectiveClass:
case Stmt::OMPMetaDirectiveClass: {
const ExplodedNode *node = Bldr.generateSink(S, Pred, Pred->getState());
diff --git a/clang/test/AST/Interp/c.c b/clang/test/AST/Interp/c.c
index 392b682..85c195d 100644
--- a/clang/test/AST/Interp/c.c
+++ b/clang/test/AST/Interp/c.c
@@ -24,6 +24,7 @@ _Static_assert(!!1, "");
int a = (1 == 1 ? 5 : 3);
_Static_assert(a == 5, ""); // all-error {{not an integral constant expression}}
+const int DiscardedPtrToIntCast = ((intptr_t)((void*)0), 0); // all-warning {{left operand of comma operator has no effect}}
const int b = 3;
_Static_assert(b == 3, ""); // pedantic-ref-warning {{not an integer constant expression}} \
diff --git a/clang/test/AST/Interp/functions.cpp b/clang/test/AST/Interp/functions.cpp
index 6e995ce..3206913 100644
--- a/clang/test/AST/Interp/functions.cpp
+++ b/clang/test/AST/Interp/functions.cpp
@@ -1,9 +1,9 @@
-// RUN: %clang_cc1 -fexperimental-new-constant-interpreter -verify %s
-// RUN: %clang_cc1 -std=c++14 -fexperimental-new-constant-interpreter -verify %s
-// RUN: %clang_cc1 -std=c++20 -fexperimental-new-constant-interpreter -verify %s
-// RUN: %clang_cc1 -verify=ref %s
-// RUN: %clang_cc1 -std=c++14 -verify=ref %s
-// RUN: %clang_cc1 -std=c++20 -verify=ref %s
+// RUN: %clang_cc1 -fexperimental-new-constant-interpreter -verify=expected,both %s
+// RUN: %clang_cc1 -std=c++14 -fexperimental-new-constant-interpreter -verify=expected,both %s
+// RUN: %clang_cc1 -std=c++20 -fexperimental-new-constant-interpreter -verify=expected,both %s
+// RUN: %clang_cc1 -verify=ref,both %s
+// RUN: %clang_cc1 -std=c++14 -verify=ref,both %s
+// RUN: %clang_cc1 -std=c++20 -verify=ref,both %s
constexpr void doNothing() {}
constexpr int gimme5() {
@@ -23,16 +23,13 @@ static_assert(!identity(false), "");
template<typename A, typename B>
constexpr bool sameSize() {
- static_assert(sizeof(A) == sizeof(B), ""); // expected-error {{static assertion failed}} \
- // ref-error {{static assertion failed}} \
- // expected-note {{evaluates to}} \
- // ref-note {{evaluates to}}
+ static_assert(sizeof(A) == sizeof(B), ""); // both-error {{static assertion failed}} \
+ // both-note {{evaluates to}}
return true;
}
static_assert(sameSize<int, int>(), "");
static_assert(sameSize<unsigned int, int>(), "");
-static_assert(sameSize<char, long>(), ""); // expected-note {{in instantiation of function template specialization}} \
- // ref-note {{in instantiation of function template specialization}}
+static_assert(sameSize<char, long>(), ""); // both-note {{in instantiation of function template specialization}}
constexpr auto add(int a, int b) -> int {
@@ -92,12 +89,9 @@ static_assert(getNum<-2>() == -2, "");
static_assert(getNum<10>() == 10, "");
static_assert(getNum() == 5, "");
-constexpr int f(); // expected-note {{declared here}} \
- // ref-note {{declared here}}
-static_assert(f() == 5, ""); // expected-error {{not an integral constant expression}} \
- // expected-note {{undefined function 'f'}} \
- // ref-error {{not an integral constant expression}} \
- // ref-note {{undefined function 'f'}}
+constexpr int f(); // both-note {{declared here}}
+static_assert(f() == 5, ""); // both-error {{not an integral constant expression}} \
+ // both-note {{undefined function 'f'}}
constexpr int a() {
return f();
}
@@ -108,17 +102,14 @@ static_assert(a() == 5, "");
constexpr int invalid() {
// Invalid expression in visit().
- while(huh) {} // expected-error {{use of undeclared identifier}} \
- // ref-error {{use of undeclared identifier}}
-
+ while(huh) {} // both-error {{use of undeclared identifier}}
return 0;
}
constexpr void invalid2() {
int i = 0;
// Invalid expression in discard().
- huh(); // expected-error {{use of undeclared identifier}} \
- // ref-error {{use of undeclared identifier}}
+ huh(); // both-error {{use of undeclared identifier}}
}
namespace FunctionPointers {
@@ -160,8 +151,7 @@ namespace FunctionReturnType {
constexpr ptr fun() {
return &fun1;
}
- static_assert(fun() == nullptr, ""); // expected-error {{static assertion failed}} \
- // ref-error {{static assertion failed}}
+ static_assert(fun() == nullptr, ""); // both-error {{static assertion failed}}
constexpr int foo() {
int (*f)(int *) = fun();
@@ -187,25 +177,24 @@ namespace FunctionReturnType {
static_assert(!!op, "");
constexpr int (*op2)(int, int) = nullptr;
static_assert(!op2, "");
+
+ int m() { return 5;} // both-note {{declared here}}
+ constexpr int (*invalidFnPtr)() = m;
+ static_assert(invalidFnPtr() == 5, ""); // both-error {{not an integral constant expression}} \
+ // both-note {{non-constexpr function 'm'}}
}
namespace Comparison {
void f(), g();
constexpr void (*pf)() = &f, (*pg)() = &g;
- constexpr bool u13 = pf < pg; // ref-warning {{ordered comparison of function pointers}} \
- // ref-error {{must be initialized by a constant expression}} \
- // ref-note {{comparison between '&f' and '&g' has unspecified value}} \
- // expected-warning {{ordered comparison of function pointers}} \
- // expected-error {{must be initialized by a constant expression}} \
- // expected-note {{comparison between '&f' and '&g' has unspecified value}}
+ constexpr bool u13 = pf < pg; // both-warning {{ordered comparison of function pointers}} \
+ // both-error {{must be initialized by a constant expression}} \
+ // both-note {{comparison between '&f' and '&g' has unspecified value}}
- constexpr bool u14 = pf < (void(*)())nullptr; // ref-warning {{ordered comparison of function pointers}} \
- // ref-error {{must be initialized by a constant expression}} \
- // ref-note {{comparison between '&f' and 'nullptr' has unspecified value}} \
- // expected-warning {{ordered comparison of function pointers}} \
- // expected-error {{must be initialized by a constant expression}} \
- // expected-note {{comparison between '&f' and 'nullptr' has unspecified value}}
+ constexpr bool u14 = pf < (void(*)())nullptr; // both-warning {{ordered comparison of function pointers}} \
+ // both-error {{must be initialized by a constant expression}} \
+ // both-note {{comparison between '&f' and 'nullptr' has unspecified value}}
@@ -241,31 +230,22 @@ static_assert(doit() == 10, "");
namespace InvalidCall {
struct S {
- constexpr int a() const { // expected-error {{never produces a constant expression}} \
- // ref-error {{never produces a constant expression}}
- return 1 / 0; // expected-note 2{{division by zero}} \
- // expected-warning {{is undefined}} \
- // ref-note 2{{division by zero}} \
- // ref-warning {{is undefined}}
+ constexpr int a() const { // both-error {{never produces a constant expression}}
+ return 1 / 0; // both-note 2{{division by zero}} \
+ // both-warning {{is undefined}}
}
};
constexpr S s;
- static_assert(s.a() == 1, ""); // expected-error {{not an integral constant expression}} \
- // expected-note {{in call to}} \
- // ref-error {{not an integral constant expression}} \
- // ref-note {{in call to}}
+ static_assert(s.a() == 1, ""); // both-error {{not an integral constant expression}} \
+ // both-note {{in call to}}
/// This used to cause an assertion failure in the new constant interpreter.
- constexpr void func(); // expected-note {{declared here}} \
- // ref-note {{declared here}}
+ constexpr void func(); // both-note {{declared here}}
struct SS {
- constexpr SS() { func(); } // expected-note {{undefined function }} \
- // ref-note {{undefined function}}
+ constexpr SS() { func(); } // both-note {{undefined function }}
};
- constexpr SS ss; // expected-error {{must be initialized by a constant expression}} \
- // expected-note {{in call to 'SS()'}} \
- // ref-error {{must be initialized by a constant expression}} \
- // ref-note {{in call to 'SS()'}}
+ constexpr SS ss; // both-error {{must be initialized by a constant expression}} \
+ // both-note {{in call to 'SS()'}}
/// This should not emit a diagnostic.
@@ -291,8 +271,7 @@ namespace CallWithArgs {
namespace ReturnLocalPtr {
constexpr int *p() {
int a = 12;
- return &a; // ref-warning {{address of stack memory}} \
- // expected-warning {{address of stack memory}}
+ return &a; // both-warning {{address of stack memory}}
}
/// GCC rejects the expression below, just like the new interpreter. The current interpreter
@@ -305,13 +284,11 @@ namespace ReturnLocalPtr {
/// new one does not.
constexpr const int &p2() {
int a = 12; // ref-note {{declared here}}
- return a; // ref-warning {{reference to stack memory associated with local variable}} \
- // expected-warning {{reference to stack memory associated with local variable}}
+ return a; // both-warning {{reference to stack memory associated with local variable}}
}
- static_assert(p2() == 12, ""); // ref-error {{not an integral constant expression}} \
- // ref-note {{read of variable whose lifetime has ended}} \
- // expected-error {{not an integral constant expression}}
+ static_assert(p2() == 12, ""); // both-error {{not an integral constant expression}} \
+ // ref-note {{read of variable whose lifetime has ended}}
}
namespace VoidReturn {
@@ -324,22 +301,16 @@ namespace VoidReturn {
}
namespace InvalidReclRefs {
- void param(bool b) { // ref-note {{declared here}} \
- // expected-note {{declared here}}
- static_assert(b, ""); // ref-error {{not an integral constant expression}} \
- // ref-note {{function parameter 'b' with unknown value}} \
- // expected-error {{not an integral constant expression}} \
- // expected-note {{function parameter 'b' with unknown value}}
+ void param(bool b) { // both-note {{declared here}}
+ static_assert(b, ""); // both-error {{not an integral constant expression}} \
+ // both-note {{function parameter 'b' with unknown value}}
static_assert(true ? true : b, "");
}
#if __cplusplus >= 202002L
- consteval void param2(bool b) { // ref-note {{declared here}} \
- // expected-note {{declared here}}
- static_assert(b, ""); // ref-error {{not an integral constant expression}} \
- // ref-note {{function parameter 'b' with unknown value}} \
- // expected-error {{not an integral constant expression}} \
- // expected-note {{function parameter 'b' with unknown value}}
+ consteval void param2(bool b) { // both-note {{declared here}}
+ static_assert(b, ""); // both-error {{not an integral constant expression}} \
+ // both-note {{function parameter 'b' with unknown value}}
}
#endif
}
@@ -381,6 +352,81 @@ namespace Variadic {
constexpr int (*VFP)(...) = variadic_function2;
static_assert(VFP() == 12, "");
+
+ /// Member functions
+ struct Foo {
+ int a = 0;
+ constexpr void bla(...) {}
+ constexpr S bla2(...) {
+ return S{12, true};
+ }
+ constexpr Foo(...) : a(1337) {}
+ constexpr Foo(void *c, bool b, void*p, ...) : a('a' + b) {}
+ constexpr Foo(int a, const S* s, ...) : a(a) {}
+ };
+
+ constexpr int foo2() {
+ Foo f(1, nullptr);
+ auto s = f.bla2(1, 2, S{1, false});
+ return s.a + s.b;
+ }
+ static_assert(foo2() == 13, "");
+
+ constexpr Foo _f = 123;
+ static_assert(_f.a == 1337, "");
+
+ constexpr Foo __f(nullptr, false, nullptr, nullptr, 'a', Foo());
+ static_assert(__f.a == 'a', "");
+
+
+#if __cplusplus >= 202002L
+namespace VariadicVirtual {
+ class A {
+ public:
+ constexpr virtual void foo(int &a, ...) {
+ a = 1;
+ }
+ };
+
+ class B : public A {
+ public:
+ constexpr void foo(int &a, ...) override {
+ a = 2;
+ }
+ };
+
+ constexpr int foo() {
+ B b;
+ int a;
+ b.foo(a, 1,2,nullptr);
+ return a;
+ }
+ static_assert(foo() == 2, "");
+} // VariadicVirtual
+
+namespace VariadicQualified {
+ class A {
+ public:
+ constexpr virtual int foo(...) const {
+ return 5;
+ }
+ };
+ class B : public A {};
+ class C : public B {
+ public:
+ constexpr int foo(...) const override {
+ return B::foo(1,2,3); // B doesn't have a foo(), so this should call A::foo().
+ }
+ constexpr int foo2() const {
+ return this->A::foo(1,2,3,this);
+ }
+ };
+ constexpr C c;
+ static_assert(c.foo() == 5);
+ static_assert(c.foo2() == 5);
+} // VariadicQualified
+#endif
+
}
namespace Packs {
@@ -399,13 +445,10 @@ namespace AddressOf {
static_assert(&pt->n == &t.n, "");
struct U { int n : 5; } u;
- int *pbf = __builtin_addressof(u.n); // expected-error {{address of bit-field requested}} \
- // ref-error {{address of bit-field requested}}
+ int *pbf = __builtin_addressof(u.n); // both-error {{address of bit-field requested}}
- S *ptmp = __builtin_addressof(S{}); // expected-error {{taking the address of a temporary}} \
- // expected-warning {{temporary whose address is used as value of local variable 'ptmp' will be destroyed at the end of the full-expression}} \
- // ref-error {{taking the address of a temporary}} \
- // ref-warning {{temporary whose address is used as value of local variable 'ptmp' will be destroyed at the end of the full-expression}}
+ S *ptmp = __builtin_addressof(S{}); // both-error {{taking the address of a temporary}} \
+ // both-warning {{temporary whose address is used as value of local variable 'ptmp' will be destroyed at the end of the full-expression}}
constexpr int foo() {return 1;}
static_assert(__builtin_addressof(foo) == foo, "");
@@ -426,8 +469,7 @@ constexpr typename std::remove_reference<T>::type&& move(T &&t) noexcept {
/// The std::move declaration above gets translated to a builtin function.
namespace Move {
#if __cplusplus >= 202002L
- consteval int f_eval() { // expected-note 12{{declared here}} \
- // ref-note 12{{declared here}}
+ consteval int f_eval() { // both-note 12{{declared here}}
return 0;
}
@@ -447,56 +489,35 @@ namespace Move {
// there is no the copy constructor call when its argument is a prvalue because of garanteed copy elision.
// so we need to test with both prvalue and xvalues.
{ Copy c(C); }
- { Copy c((Copy(&f_eval))); } // expected-error {{cannot take address of consteval}} \
- // ref-error {{cannot take address of consteval}}
+ { Copy c((Copy(&f_eval))); } // both-error {{cannot take address of consteval}}
{ Copy c(std::move(C)); }
- { Copy c(std::move(Copy(&f_eval))); } // expected-error {{is not a constant expression}} \
- // expected-note {{to a consteval}} \
- // ref-error {{is not a constant expression}} \
- // ref-note {{to a consteval}}
- { Copy c(to_lvalue_ref((Copy(&f_eval)))); } // expected-error {{is not a constant expression}} \
- // expected-note {{to a consteval}} \
- // ref-error {{is not a constant expression}} \
- // ref-note {{to a consteval}}
+ { Copy c(std::move(Copy(&f_eval))); } // both-error {{is not a constant expression}} \
+ // both-note {{to a consteval}}
+ { Copy c(to_lvalue_ref((Copy(&f_eval)))); } // both-error {{is not a constant expression}} \
+ // both-note {{to a consteval}}
{ Copy c(to_lvalue_ref(std::move(C))); }
- { Copy c(to_lvalue_ref(std::move(Copy(&f_eval)))); } // expected-error {{is not a constant expression}} \
- // expected-note {{to a consteval}} \
- // ref-error {{is not a constant expression}} \
- // ref-note {{to a consteval}}
+ { Copy c(to_lvalue_ref(std::move(Copy(&f_eval)))); } // both-error {{is not a constant expression}} \
+ // both-note {{to a consteval}}
{ Copy c = Copy(C); }
- { Copy c = Copy(Copy(&f_eval)); } // expected-error {{cannot take address of consteval}} \
- // ref-error {{cannot take address of consteval}}
+ { Copy c = Copy(Copy(&f_eval)); } // both-error {{cannot take address of consteval}}
{ Copy c = Copy(std::move(C)); }
- { Copy c = Copy(std::move(Copy(&f_eval))); } // expected-error {{is not a constant expression}} \
- // expected-note {{to a consteval}} \
- // ref-error {{is not a constant expression}} \
- // ref-note {{to a consteval}}
- { Copy c = Copy(to_lvalue_ref(Copy(&f_eval))); } // expected-error {{is not a constant expression}} \
- // expected-note {{to a consteval}} \
- // ref-error {{is not a constant expression}} \
- // ref-note {{to a consteval}}
+ { Copy c = Copy(std::move(Copy(&f_eval))); } // both-error {{is not a constant expression}} \
+ // both-note {{to a consteval}}
+ { Copy c = Copy(to_lvalue_ref(Copy(&f_eval))); } // both-error {{is not a constant expression}} \
+ // both-note {{to a consteval}}
{ Copy c = Copy(to_lvalue_ref(std::move(C))); }
- { Copy c = Copy(to_lvalue_ref(std::move(Copy(&f_eval)))); } // expected-error {{is not a constant expression}} \
- // expected-note {{to a consteval}} \
- // ref-error {{is not a constant expression}} \
- // ref-note {{to a consteval}}
+ { Copy c = Copy(to_lvalue_ref(std::move(Copy(&f_eval)))); } // both-error {{is not a constant expression}} \
+ // both-note {{to a consteval}}
{ Copy c; c = Copy(C); }
- { Copy c; c = Copy(Copy(&f_eval)); } // expected-error {{cannot take address of consteval}} \
- // ref-error {{cannot take address of consteval}}
+ { Copy c; c = Copy(Copy(&f_eval)); } // both-error {{cannot take address of consteval}}
{ Copy c; c = Copy(std::move(C)); }
- { Copy c; c = Copy(std::move(Copy(&f_eval))); } // expected-error {{is not a constant expression}} \
- // expected-note {{to a consteval}} \
- // ref-error {{is not a constant expression}} \
- // ref-note {{to a consteval}}
- { Copy c; c = Copy(to_lvalue_ref(Copy(&f_eval))); } // expected-error {{is not a constant expression}} \
- // expected-note {{to a consteval}} \
- // ref-error {{is not a constant expression}} \
- // ref-note {{to a consteval}}
+ { Copy c; c = Copy(std::move(Copy(&f_eval))); } // both-error {{is not a constant expression}} \
+ // both-note {{to a consteval}}
+ { Copy c; c = Copy(to_lvalue_ref(Copy(&f_eval))); } // both-error {{is not a constant expression}} \
+ // both-note {{to a consteval}}
{ Copy c; c = Copy(to_lvalue_ref(std::move(C))); }
- { Copy c; c = Copy(to_lvalue_ref(std::move(Copy(&f_eval)))); } // expected-error {{is not a constant expression}} \
- // expected-note {{to a consteval}} \
- // ref-error {{is not a constant expression}} \
- // ref-note {{to a consteval}}
+ { Copy c; c = Copy(to_lvalue_ref(std::move(Copy(&f_eval)))); } // both-error {{is not a constant expression}} \
+ // both-note {{to a consteval}}
}
#endif
constexpr int A = std::move(5);
diff --git a/clang/test/AST/Interp/literals.cpp b/clang/test/AST/Interp/literals.cpp
index f5b5f77..bc994c3 100644
--- a/clang/test/AST/Interp/literals.cpp
+++ b/clang/test/AST/Interp/literals.cpp
@@ -915,6 +915,13 @@ static_assert(ignoredDecls() == 12, "");
namespace DiscardExprs {
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-value"
+ typedef struct _GUID {
+ __UINT32_TYPE__ Data1;
+ __UINT16_TYPE__ Data2;
+ __UINT16_TYPE__ Data3;
+ __UINT8_TYPE__ Data4[8];
+ } GUID;
+ class __declspec(uuid("000000A0-0000-0000-C000-000000000049")) GuidType;
struct A{ int a; };
constexpr int ignoredExprs() {
@@ -951,6 +958,8 @@ namespace DiscardExprs {
(float)1;
(double)1.0f;
(signed)4u;
+ __uuidof(GuidType);
+ __uuidof(number); // both-error {{cannot call operator __uuidof on a type with no GUID}}
return 0;
}
@@ -1105,3 +1114,13 @@ namespace NonConstReads {
static_assert(z == 0, ""); // both-error {{not an integral constant expression}} \
// both-note {{read of non-const variable 'z'}}
}
+
+/// This test passes a MaterializedTemporaryExpr to evaluateAsRValue.
+/// That needs to return a null pointer after the lvalue-to-rvalue conversion.
+/// We used to fail to do that.
+namespace rdar8769025 {
+ __attribute__((nonnull)) void f1(int * const &p);
+ void test_f1() {
+ f1(0); // both-warning{{null passed to a callee that requires a non-null argument}}
+ }
+}
diff --git a/clang/test/Analysis/Checkers/WebKit/call-args-protected-return-value.cpp b/clang/test/Analysis/Checkers/WebKit/call-args-protected-return-value.cpp
new file mode 100644
index 0000000..1c4b3df
--- /dev/null
+++ b/clang/test/Analysis/Checkers/WebKit/call-args-protected-return-value.cpp
@@ -0,0 +1,23 @@
+// RUN: %clang_analyze_cc1 -analyzer-checker=alpha.webkit.UncountedCallArgsChecker -verify %s
+// expected-no-diagnostics
+
+#include "mock-types.h"
+
+class RefCounted {
+public:
+ void ref();
+ void deref();
+};
+
+class Object {
+public:
+ void someFunction(RefCounted&);
+};
+
+RefPtr<Object> object();
+RefPtr<RefCounted> protectedTargetObject();
+
+void testFunction() {
+ if (RefPtr obj = object())
+ obj->someFunction(*protectedTargetObject());
+}
diff --git a/clang/test/Analysis/Checkers/WebKit/call-args-dynamic-downcast.cpp b/clang/test/Analysis/Checkers/WebKit/call-args-safe-functions.cpp
index 2815662..a874465 100644
--- a/clang/test/Analysis/Checkers/WebKit/call-args-dynamic-downcast.cpp
+++ b/clang/test/Analysis/Checkers/WebKit/call-args-safe-functions.cpp
@@ -23,13 +23,34 @@ public:
Derived* obj();
};
+class String {
+};
+
template<typename Target, typename Source>
inline Target* dynamicDowncast(Source* source)
{
return static_cast<Target*>(source);
}
+template<typename Target, typename Source>
+inline Target* checkedDowncast(Source* source)
+{
+ return static_cast<Target*>(source);
+}
+
+template<typename Target, typename Source>
+inline Target* uncheckedDowncast(Source* source)
+{
+ return static_cast<Target*>(source);
+}
+
+template<typename... Types>
+String toString(const Types&... values);
+
void foo(OtherObject* other)
{
dynamicDowncast<SubDerived>(other->obj());
+ checkedDowncast<SubDerived>(other->obj());
+ uncheckedDowncast<SubDerived>(other->obj());
+ toString(other->obj());
}
diff --git a/clang/test/Analysis/Checkers/WebKit/call-args.cpp b/clang/test/Analysis/Checkers/WebKit/call-args.cpp
index e5c4988..f2e1f9b 100644
--- a/clang/test/Analysis/Checkers/WebKit/call-args.cpp
+++ b/clang/test/Analysis/Checkers/WebKit/call-args.cpp
@@ -2,8 +2,9 @@
#include "mock-types.h"
-RefCountable* provide() { return nullptr; }
-void consume_refcntbl(RefCountable*) {}
+RefCountable* provide();
+void consume_refcntbl(RefCountable*);
+void some_function();
namespace simple {
void foo() {
@@ -19,7 +20,7 @@ namespace simple {
}
namespace multi_arg {
- void consume_refcntbl(int, RefCountable* foo, bool) {}
+ void consume_refcntbl(int, RefCountable* foo, bool);
void foo() {
consume_refcntbl(42, provide(), true);
// expected-warning@-1{{Call argument for parameter 'foo' is uncounted and unsafe}}
@@ -38,8 +39,8 @@ namespace ref_counted {
namespace methods {
struct Consumer {
- void consume_ptr(RefCountable* ptr) {}
- void consume_ref(const RefCountable& ref) {}
+ void consume_ptr(RefCountable* ptr);
+ void consume_ref(const RefCountable& ref);
};
void foo() {
@@ -53,7 +54,7 @@ namespace methods {
void foo2() {
struct Consumer {
- void consume(RefCountable*) { }
+ void consume(RefCountable*) { some_function(); }
void whatever() {
consume(provide());
// expected-warning@-1{{Call argument is uncounted and unsafe}}
@@ -63,7 +64,7 @@ namespace methods {
void foo3() {
struct Consumer {
- void consume(RefCountable*) { }
+ void consume(RefCountable*) { some_function(); }
void whatever() {
this->consume(provide());
// expected-warning@-1{{Call argument is uncounted and unsafe}}
@@ -73,7 +74,7 @@ namespace methods {
}
namespace casts {
- RefCountable* downcast(RefCountable*) { return nullptr; }
+ RefCountable* downcast(RefCountable*);
void foo() {
consume_refcntbl(provide());
@@ -145,8 +146,8 @@ namespace Ref_to_reference_conversion_operator {
}
namespace param_formarding_function {
- void consume_ref_countable_ref(RefCountable&) {}
- void consume_ref_countable_ptr(RefCountable*) {}
+ void consume_ref_countable_ref(RefCountable&);
+ void consume_ref_countable_ptr(RefCountable*);
namespace ptr {
void foo(RefCountable* param) {
@@ -185,8 +186,8 @@ namespace param_formarding_function {
}
namespace param_formarding_lambda {
- auto consume_ref_countable_ref = [](RefCountable&) {};
- auto consume_ref_countable_ptr = [](RefCountable*) {};
+ auto consume_ref_countable_ref = [](RefCountable&) { some_function(); };
+ auto consume_ref_countable_ptr = [](RefCountable*) { some_function(); };
namespace ptr {
void foo(RefCountable* param) {
@@ -304,7 +305,7 @@ namespace string_impl {
namespace default_arg {
RefCountable* global;
- void function_with_default_arg(RefCountable* param = global) {}
+ void function_with_default_arg(RefCountable* param = global);
// expected-warning@-1{{Call argument for parameter 'param' is uncounted and unsafe}}
void foo() {
@@ -315,9 +316,9 @@ namespace default_arg {
namespace cxx_member_operator_call {
// The hidden this-pointer argument without a corresponding parameter caused couple bugs in parameter <-> argument attribution.
struct Foo {
- Foo& operator+(RefCountable* bad) { return *this; }
- friend Foo& operator-(Foo& lhs, RefCountable* bad) { return lhs; }
- void operator()(RefCountable* bad) { }
+ Foo& operator+(RefCountable* bad);
+ friend Foo& operator-(Foo& lhs, RefCountable* bad);
+ void operator()(RefCountable* bad);
};
RefCountable* global;
diff --git a/clang/test/Analysis/Checkers/WebKit/ref-cntbl-base-virtual-dtor.cpp b/clang/test/Analysis/Checkers/WebKit/ref-cntbl-base-virtual-dtor.cpp
index 1fc59c1..5cf7e76 100644
--- a/clang/test/Analysis/Checkers/WebKit/ref-cntbl-base-virtual-dtor.cpp
+++ b/clang/test/Analysis/Checkers/WebKit/ref-cntbl-base-virtual-dtor.cpp
@@ -13,7 +13,17 @@ struct DerivedWithVirtualDtor : RefCntblBase {
virtual ~DerivedWithVirtualDtor() {}
};
+// Confirm that the checker respects [[clang::suppress]]
+struct [[clang::suppress]] SuppressedDerived : RefCntblBase { };
+struct [[clang::suppress]] SuppressedDerivedWithVirtualDtor : RefCntblBase {
+ virtual ~SuppressedDerivedWithVirtualDtor() {}
+};
+// FIXME: Support attributes on base specifiers? Currently clang
+// doesn't support such attributes at all, even though it knows
+// how to parse them.
+//
+// struct SuppressedBaseSpecDerived : [[clang::suppress]] RefCntblBase { };
template<class T>
struct DerivedClassTmpl : T { };
diff --git a/clang/test/Analysis/Checkers/WebKit/uncounted-lambda-captures.cpp b/clang/test/Analysis/Checkers/WebKit/uncounted-lambda-captures.cpp
index 3079879..27e0a74 100644
--- a/clang/test/Analysis/Checkers/WebKit/uncounted-lambda-captures.cpp
+++ b/clang/test/Analysis/Checkers/WebKit/uncounted-lambda-captures.cpp
@@ -15,6 +15,11 @@ void raw_ptr() {
// CHECK-NEXT:{{^ | }} ^
auto foo4 = [=](){ (void) ref_countable; };
// CHECK: warning: Implicitly captured raw-pointer 'ref_countable' to uncounted type is unsafe [webkit.UncountedLambdaCapturesChecker]
+
+ // Confirm that the checker respects [[clang::suppress]].
+ RefCountable* suppressed_ref_countable = nullptr;
+ [[clang::suppress]] auto foo5 = [suppressed_ref_countable](){};
+ // CHECK-NOT: warning: Captured raw-pointer 'suppressed_ref_countable' to uncounted type is unsafe [webkit.UncountedLambdaCapturesChecker]
}
void references() {
diff --git a/clang/test/Analysis/Checkers/WebKit/uncounted-local-vars.cpp b/clang/test/Analysis/Checkers/WebKit/uncounted-local-vars.cpp
index 8694d5f..0fcd3b2 100644
--- a/clang/test/Analysis/Checkers/WebKit/uncounted-local-vars.cpp
+++ b/clang/test/Analysis/Checkers/WebKit/uncounted-local-vars.cpp
@@ -60,6 +60,7 @@ class Foo {
// expected-warning@-1{{Local variable 'baz' is uncounted and unsafe [alpha.webkit.UncountedLocalVarsChecker]}}
auto *baz2 = this->provide_ref_ctnbl();
// expected-warning@-1{{Local variable 'baz2' is uncounted and unsafe [alpha.webkit.UncountedLocalVarsChecker]}}
+ [[clang::suppress]] auto *baz_suppressed = provide_ref_ctnbl(); // no-warning
}
};
} // namespace auto_keyword
diff --git a/clang/test/Analysis/Checkers/WebKit/uncounted-members.cpp b/clang/test/Analysis/Checkers/WebKit/uncounted-members.cpp
index a0ea61e..108d5ef 100644
--- a/clang/test/Analysis/Checkers/WebKit/uncounted-members.cpp
+++ b/clang/test/Analysis/Checkers/WebKit/uncounted-members.cpp
@@ -8,6 +8,9 @@ namespace members {
RefCountable* a = nullptr;
// expected-warning@-1{{Member variable 'a' in 'members::Foo' is a raw pointer to ref-countable type 'RefCountable'}}
+ [[clang::suppress]]
+ RefCountable* a_suppressed = nullptr;
+
protected:
RefPtr<RefCountable> b;
@@ -25,8 +28,14 @@ namespace members {
};
void forceTmplToInstantiate(FooTmpl<RefCountable>) {}
+
+ struct [[clang::suppress]] FooSuppressed {
+ private:
+ RefCountable* a = nullptr;
+ };
}
+
namespace ignore_unions {
union Foo {
RefCountable* a;
diff --git a/clang/test/Analysis/Checkers/WebKit/uncounted-obj-arg.cpp b/clang/test/Analysis/Checkers/WebKit/uncounted-obj-arg.cpp
new file mode 100644
index 0000000..156a248
--- /dev/null
+++ b/clang/test/Analysis/Checkers/WebKit/uncounted-obj-arg.cpp
@@ -0,0 +1,251 @@
+// RUN: %clang_analyze_cc1 -analyzer-checker=alpha.webkit.UncountedCallArgsChecker -verify %s
+
+#include "mock-types.h"
+//#include <type_traits>
+
+void WTFBreakpointTrap();
+void WTFCrashWithInfo(int, const char*, const char*, int);
+
+inline void compilerFenceForCrash()
+{
+ asm volatile("" ::: "memory");
+}
+
+inline void isIntegralOrPointerType() { }
+
+template<typename T, typename... Types>
+void isIntegralOrPointerType(T, Types... types)
+{
+ static_assert(sizeof(char) < sizeof(short), "All types need to be bitwise_cast-able to integral type for logging");
+ isIntegralOrPointerType(types...);
+}
+
+#define CRASH_WITH_INFO(...) do { \
+ isIntegralOrPointerType(__VA_ARGS__); \
+ compilerFenceForCrash(); \
+ WTFBreakpointTrap(); \
+ __builtin_unreachable(); \
+} while (0)
+
+#define RELEASE_ASSERT(assertion, ...) do { \
+ if (!(assertion)) \
+ CRASH_WITH_INFO(__VA_ARGS__); \
+} while (0)
+
+#if !defined(NOT_TAIL_CALLED)
+#if __has_attribute(not_tail_called)
+#define NOT_TAIL_CALLED __attribute__((not_tail_called))
+#else
+#define NOT_TAIL_CALLED
+#endif
+#endif
+#define NO_RETURN_DUE_TO_CRASH
+
+#if !defined(ALWAYS_INLINE)
+#define ALWAYS_INLINE inline
+#endif
+
+NO_RETURN_DUE_TO_CRASH NOT_TAIL_CALLED void WTFCrashWithInfoImpl(int line, const char* file, const char* function, int counter, unsigned long reason);
+NO_RETURN_DUE_TO_CRASH NOT_TAIL_CALLED void WTFCrashWithInfo(int line, const char* file, const char* function, int counter);
+
+template<typename T>
+ALWAYS_INLINE unsigned long wtfCrashArg(T* arg) { return reinterpret_cast<unsigned long>(arg); }
+
+template<typename T>
+ALWAYS_INLINE unsigned long wtfCrashArg(T arg) { return arg; }
+
+template<typename T>
+NO_RETURN_DUE_TO_CRASH ALWAYS_INLINE void WTFCrashWithInfo(int line, const char* file, const char* function, int counter, T reason)
+{
+ WTFCrashWithInfoImpl(line, file, function, counter, wtfCrashArg(reason));
+}
+
+class Number {
+public:
+ Number(int v) : v(v) { }
+ Number(double);
+ Number operator+(const Number&);
+private:
+ int v;
+};
+
+class RefCounted {
+public:
+ void ref() const;
+ void deref() const;
+
+ void someFunction();
+ int otherFunction();
+
+ int trivial1() { return 123; }
+ float trivial2() { return 0.3; }
+ float trivial3() { return (float)0.4; }
+ float trivial4() { return 0.5f; }
+ char trivial5() { return 'a'; }
+ const char *trivial6() { return "abc"; }
+ int trivial7() { return (1); }
+ Number trivial8() { return Number { 5 }; }
+ int trivial9() { return 3 + 4; }
+ int trivial10() { return 0x1010 | 0x1; }
+ int trivial11(int v) { return v + 1; }
+ const char *trivial12(char *p) { return p ? "str" : "null"; }
+ int trivial13(int v) {
+ if (v)
+ return 123;
+ else
+ return 0;
+ }
+ int trivial14(int v) {
+ switch (v) {
+ case 1:
+ return 100;
+ case 2:
+ return 200;
+ default:
+ return 300;
+ }
+ return 0;
+ }
+ void *trivial15() { return static_cast<void*>(this); }
+ unsigned long trivial16() { return reinterpret_cast<unsigned long>(this); }
+ RefCounted& trivial17() const { return const_cast<RefCounted&>(*this); }
+ RefCounted& trivial18() const { RELEASE_ASSERT(this, "this must be not null"); return const_cast<RefCounted&>(*this); }
+ void trivial19() const { return; }
+
+ static RefCounted& singleton() {
+ static RefCounted s_RefCounted;
+ s_RefCounted.ref();
+ return s_RefCounted;
+ }
+
+ Number nonTrivial1() { return Number(3) + Number(4); }
+ Number nonTrivial2() { return Number { 0.3 }; }
+ int nonTrivial3() { return v ? otherFunction() : 0; }
+ int nonTrivial4() {
+ if (v)
+ return 8;
+ else
+ return otherFunction();
+ }
+
+ int nonTrivial5() {
+ if (v)
+ return otherFunction();
+ else
+ return 9;
+ }
+
+ int nonTrivial6() {
+ if (otherFunction())
+ return 1;
+ else
+ return 0;
+ }
+
+ int nonTrivial7() {
+ switch (v) {
+ case 1:
+ return otherFunction();
+ default:
+ return 7;
+ }
+ }
+
+ int nonTrivial8() {
+ switch (v) {
+ case 1:
+ return 9;
+ default:
+ return otherFunction();
+ }
+ }
+
+ int nonTrivial9() {
+ switch (otherFunction()) {
+ case 0:
+ return -1;
+ default:
+ return 12;
+ }
+ }
+
+ unsigned v { 0 };
+};
+
+RefCounted* refCountedObj();
+
+void test()
+{
+ refCountedObj()->someFunction();
+ // expected-warning@-1{{Call argument for 'this' parameter is uncounted and unsafe}}
+}
+
+class UnrelatedClass {
+ RefPtr<RefCounted> Field;
+ bool value;
+
+public:
+ RefCounted &getFieldTrivial() { return *Field.get(); }
+ RefCounted *getFieldTernary() { return value ? Field.get() : nullptr; }
+
+ void test() {
+ getFieldTrivial().trivial1(); // no-warning
+ getFieldTrivial().trivial2(); // no-warning
+ getFieldTrivial().trivial3(); // no-warning
+ getFieldTrivial().trivial4(); // no-warning
+ getFieldTrivial().trivial5(); // no-warning
+ getFieldTrivial().trivial6(); // no-warning
+ getFieldTrivial().trivial7(); // no-warning
+ getFieldTrivial().trivial8(); // no-warning
+ getFieldTrivial().trivial9(); // no-warning
+ getFieldTrivial().trivial10(); // no-warning
+ getFieldTrivial().trivial11(1); // no-warning
+ getFieldTrivial().trivial12(nullptr); // no-warning
+ getFieldTrivial().trivial13(0); // no-warning
+ getFieldTrivial().trivial14(3); // no-warning
+ getFieldTrivial().trivial15(); // no-warning
+ getFieldTrivial().trivial16(); // no-warning
+ getFieldTrivial().trivial17(); // no-warning
+ getFieldTrivial().trivial18(); // no-warning
+ getFieldTrivial().trivial19(); // no-warning
+ RefCounted::singleton().trivial18(); // no-warning
+ RefCounted::singleton().someFunction(); // no-warning
+
+ getFieldTrivial().someFunction();
+ // expected-warning@-1{{Call argument for 'this' parameter is uncounted and unsafe}}
+ getFieldTrivial().nonTrivial1();
+ // expected-warning@-1{{Call argument for 'this' parameter is uncounted and unsafe}}
+ getFieldTrivial().nonTrivial2();
+ // expected-warning@-1{{Call argument for 'this' parameter is uncounted and unsafe}}
+ getFieldTrivial().nonTrivial3();
+ // expected-warning@-1{{Call argument for 'this' parameter is uncounted and unsafe}}
+ getFieldTrivial().nonTrivial4();
+ // expected-warning@-1{{Call argument for 'this' parameter is uncounted and unsafe}}
+ getFieldTrivial().nonTrivial5();
+ // expected-warning@-1{{Call argument for 'this' parameter is uncounted and unsafe}}
+ getFieldTrivial().nonTrivial6();
+ // expected-warning@-1{{Call argument for 'this' parameter is uncounted and unsafe}}
+ getFieldTrivial().nonTrivial7();
+ // expected-warning@-1{{Call argument for 'this' parameter is uncounted and unsafe}}
+ getFieldTrivial().nonTrivial8();
+ // expected-warning@-1{{Call argument for 'this' parameter is uncounted and unsafe}}
+ getFieldTrivial().nonTrivial9();
+ // expected-warning@-1{{Call argument for 'this' parameter is uncounted and unsafe}}
+ }
+};
+
+class UnrelatedClass2 {
+ RefPtr<UnrelatedClass> Field;
+
+public:
+ UnrelatedClass &getFieldTrivial() { return *Field.get(); }
+ RefCounted &getFieldTrivialRecursively() { return getFieldTrivial().getFieldTrivial(); }
+ RefCounted *getFieldTrivialTernary() { return Field ? Field->getFieldTernary() : nullptr; }
+
+ void test() {
+ getFieldTrivialRecursively().trivial1(); // no-warning
+ getFieldTrivialTernary()->trivial2(); // no-warning
+ getFieldTrivialRecursively().someFunction();
+ // expected-warning@-1{{Call argument for 'this' parameter is uncounted and unsafe}}
+ }
+};
diff --git a/clang/test/Analysis/ObjCRetSigs.m b/clang/test/Analysis/ObjCRetSigs.m
index 97d33f9..f92506a 100644
--- a/clang/test/Analysis/ObjCRetSigs.m
+++ b/clang/test/Analysis/ObjCRetSigs.m
@@ -4,10 +4,12 @@ int printf(const char *, ...);
@interface MyBase
-(long long)length;
+-(long long)suppressedLength;
@end
@interface MySub : MyBase{}
-(double)length;
+-(double)suppressedLength;
@end
@implementation MyBase
@@ -15,6 +17,10 @@ int printf(const char *, ...);
printf("Called MyBase -length;\n");
return 3;
}
+-(long long)suppressedLength{
+ printf("Called MyBase -length;\n");
+ return 3;
+}
@end
@implementation MySub
@@ -22,4 +28,8 @@ int printf(const char *, ...);
printf("Called MySub -length;\n");
return 3.3;
}
+-(double)suppressedLength [[clang::suppress]]{ // no-warning
+ printf("Called MySub -length;\n");
+ return 3.3;
+}
@end
diff --git a/clang/test/Analysis/objc_invalidation.m b/clang/test/Analysis/objc_invalidation.m
index 52a79d8..e61b089 100644
--- a/clang/test/Analysis/objc_invalidation.m
+++ b/clang/test/Analysis/objc_invalidation.m
@@ -257,6 +257,17 @@ extern void NSLog(NSString *format, ...) __attribute__((format(__NSString__, 1,
@implementation MissingInvalidationMethod
@end
+@interface SuppressedMissingInvalidationMethod : Foo <FooBar_Protocol>
+@property (assign) [[clang::suppress]] SuppressedMissingInvalidationMethod *foobar16_warn;
+// FIXME: Suppression should have worked but decl-with-issue is the ivar, not the property.
+#if RUN_IVAR_INVALIDATION
+// expected-warning@-3 {{Property foobar16_warn needs to be invalidated; no invalidation method is defined in the @implementation for SuppressedMissingInvalidationMethod}}
+#endif
+
+@end
+@implementation SuppressedMissingInvalidationMethod
+@end
+
@interface MissingInvalidationMethod2 : Foo <FooBar_Protocol> {
Foo *Ivar1;
#if RUN_IVAR_INVALIDATION
@@ -290,8 +301,10 @@ extern void NSLog(NSString *format, ...) __attribute__((format(__NSString__, 1,
@end
@interface InvalidatedInPartial : SomeInvalidationImplementingObject {
- SomeInvalidationImplementingObject *Ivar1;
- SomeInvalidationImplementingObject *Ivar2;
+ SomeInvalidationImplementingObject *Ivar1;
+ SomeInvalidationImplementingObject *Ivar2;
+ [[clang::suppress]]
+ SomeInvalidationImplementingObject *Ivar3; // no-warning
}
-(void)partialInvalidator __attribute__((annotate("objc_instance_variable_invalidator_partial")));
@end
diff --git a/clang/test/Analysis/scan-build/html_output.test b/clang/test/Analysis/scan-build/html_output.test
index eed2051..add35d8 100644
--- a/clang/test/Analysis/scan-build/html_output.test
+++ b/clang/test/Analysis/scan-build/html_output.test
@@ -19,13 +19,17 @@ CHECK-FILENAMES: report-{{.*}}.html
CHECK-FILENAMES: scanview.css
CHECK-FILENAMES: sorttable.js
-
-// The index should have a link to the report for the single issue.
+// Tests for the front page.
RUN: cat %t.output_dir/*/index.html \
RUN: | FileCheck %s -check-prefix CHECK-INDEX-HTML
+// Let's confirm that the new filtering facility is present.
+CHECK-INDEX-HTML: Filter Results by File
+
+// The index should have a link to the report for the single issue.
CHECK-INDEX-HTML: <!-- REPORTBUG id="report-{{.*}}.html" -->
+
// The report should describe the issue.
RUN: cat %t.output_dir/*/report-*.html \
RUN: | FileCheck %s -check-prefix CHECK-REPORT-HTML
diff --git a/clang/test/Analysis/suppression-attr-doc.cpp b/clang/test/Analysis/suppression-attr-doc.cpp
index 1208842..ca4e665 100644
--- a/clang/test/Analysis/suppression-attr-doc.cpp
+++ b/clang/test/Analysis/suppression-attr-doc.cpp
@@ -52,3 +52,17 @@ int bar2(bool coin_flip) {
__attribute__((suppress))
return *result; // leak warning is suppressed only on this path
}
+
+class [[clang::suppress]] C {
+ int foo() {
+ int *x = nullptr;
+ return *x; // warnings suppressed in the entire class
+ }
+
+ int bar();
+};
+
+int C::bar() {
+ int *x = nullptr;
+ return *x; // expected-warning{{Dereference of null pointer (loaded from variable 'x')}}
+}
diff --git a/clang/test/Analysis/suppression-attr.cpp b/clang/test/Analysis/suppression-attr.cpp
new file mode 100644
index 0000000..89bc3c4
--- /dev/null
+++ b/clang/test/Analysis/suppression-attr.cpp
@@ -0,0 +1,68 @@
+// RUN: %clang_analyze_cc1 -analyzer-checker=core -verify %s
+
+namespace [[clang::suppress]]
+suppressed_namespace {
+ int foo() {
+ int *x = 0;
+ return *x;
+ }
+
+ int foo_forward();
+}
+
+int suppressed_namespace::foo_forward() {
+ int *x = 0;
+ return *x; // expected-warning{{Dereference of null pointer (loaded from variable 'x')}}
+}
+
+// Another instance of the same namespace.
+namespace suppressed_namespace {
+ int bar() {
+ int *x = 0;
+ return *x; // expected-warning{{Dereference of null pointer (loaded from variable 'x')}}
+ }
+}
+
+void lambda() {
+ [[clang::suppress]] {
+ auto lam = []() {
+ int *x = 0;
+ return *x;
+ };
+ }
+}
+
+class [[clang::suppress]] SuppressedClass {
+ int foo() {
+ int *x = 0;
+ return *x;
+ }
+
+ int bar();
+};
+
+int SuppressedClass::bar() {
+ int *x = 0;
+ return *x; // expected-warning{{Dereference of null pointer (loaded from variable 'x')}}
+}
+
+class SuppressedMethodClass {
+ [[clang::suppress]] int foo() {
+ int *x = 0;
+ return *x;
+ }
+
+ [[clang::suppress]] int bar1();
+ int bar2();
+};
+
+int SuppressedMethodClass::bar1() {
+ int *x = 0;
+ return *x; // expected-warning{{Dereference of null pointer (loaded from variable 'x')}}
+}
+
+[[clang::suppress]]
+int SuppressedMethodClass::bar2() {
+ int *x = 0;
+ return *x; // no-warning
+}
diff --git a/clang/test/Analysis/suppression-attr.m b/clang/test/Analysis/suppression-attr.m
index 8ba8dda..acef4b3 100644
--- a/clang/test/Analysis/suppression-attr.m
+++ b/clang/test/Analysis/suppression-attr.m
@@ -168,17 +168,15 @@ void malloc_leak_suppression_2_1() {
*x = 42;
}
-// TODO: reassess when we decide what to do with declaration annotations
-void malloc_leak_suppression_2_2() /* SUPPRESS */ {
+void malloc_leak_suppression_2_2() SUPPRESS {
int *x = (int *)malloc(sizeof(int));
*x = 42;
-} // expected-warning{{Potential leak of memory pointed to by 'x'}}
+} // no-warning
-// TODO: reassess when we decide what to do with declaration annotations
-/* SUPPRESS */ void malloc_leak_suppression_2_3() {
+SUPPRESS void malloc_leak_suppression_2_3() {
int *x = (int *)malloc(sizeof(int));
*x = 42;
-} // expected-warning{{Potential leak of memory pointed to by 'x'}}
+} // no-warning
void malloc_leak_suppression_2_4(int cond) {
int *x = (int *)malloc(sizeof(int));
@@ -233,20 +231,15 @@ void retain_release_leak__suppression_2(int cond) {
@interface TestSuppress : UIResponder {
}
-// TODO: reassess when we decide what to do with declaration annotations
-@property(copy) /* SUPPRESS */ NSMutableString *mutableStr;
-// expected-warning@-1 {{Property of mutable type 'NSMutableString' has 'copy' attribute; an immutable object will be stored instead}}
+@property(copy) SUPPRESS NSMutableString *mutableStr; // no-warning
@end
@implementation TestSuppress
-// TODO: reassess when we decide what to do with declaration annotations
-- (BOOL)resignFirstResponder /* SUPPRESS */ {
+- (BOOL)resignFirstResponder SUPPRESS { // no-warning
return 0;
-} // expected-warning {{The 'resignFirstResponder' instance method in UIResponder subclass 'TestSuppress' is missing a [super resignFirstResponder] call}}
+}
-// TODO: reassess when we decide what to do with declaration annotations
-- (void)methodWhichMayFail:(NSError **)error /* SUPPRESS */ {
- // expected-warning@-1 {{Method accepting NSError** should have a non-void return value to indicate whether or not an error occurred}}
+- (void)methodWhichMayFail:(NSError **)error SUPPRESS { // no-warning
}
@end
@@ -269,3 +262,40 @@ void ast_checker_suppress_1() {
struct ABC *Abc;
SUPPRESS { Abc = (struct ABC *)&Ab; }
}
+
+SUPPRESS int suppressed_function() {
+ int *x = 0;
+ return *x; // no-warning
+}
+
+SUPPRESS int suppressed_function_forward();
+int suppressed_function_forward() {
+ int *x = 0;
+ return *x; // expected-warning{{Dereference of null pointer (loaded from variable 'x')}}
+}
+
+int suppressed_function_backward();
+SUPPRESS int suppressed_function_backward() {
+ int *x = 0;
+ return *x; // no-warning
+}
+
+SUPPRESS
+@interface SuppressedInterface
+-(int)suppressedMethod;
+-(int)regularMethod SUPPRESS;
+@end
+
+@implementation SuppressedInterface
+-(int)suppressedMethod SUPPRESS {
+ int *x = 0;
+ return *x; // no-warning
+}
+
+// This one is NOT suppressed by the attribute on the forward declaration,
+// and it's also NOT suppressed by the attribute on the entire interface.
+-(int)regularMethod {
+ int *x = 0;
+ return *x; // expected-warning{{Dereference of null pointer (loaded from variable 'x')}}
+}
+@end
diff --git a/clang/test/Analysis/unused-ivars.m b/clang/test/Analysis/unused-ivars.m
index 32e7e80..8788804 100644
--- a/clang/test/Analysis/unused-ivars.m
+++ b/clang/test/Analysis/unused-ivars.m
@@ -44,6 +44,15 @@
}
@end
+// Confirm that the checker respects [[clang::suppress]].
+@interface TestC {
+@private
+ [[clang::suppress]] int x; // no-warning
+}
+@end
+@implementation TestC @end
+
+
//===----------------------------------------------------------------------===//
// Detect that ivar is in use, if used in category in the same file as the
// implementation.
@@ -125,4 +134,4 @@ Radar11059352_1 *_workspacePath;
- (void)useWorkspace {
NSString *workspacePathString = _workspacePath.pathString;
}
-@end \ No newline at end of file
+@end
diff --git a/clang/test/C/C2x/n2549.c b/clang/test/C/C2x/n2549.c
new file mode 100644
index 0000000..817338b
--- /dev/null
+++ b/clang/test/C/C2x/n2549.c
@@ -0,0 +1,14 @@
+// RUN: %clang_cc1 -verify -std=c23 %s
+// RUN: %clang_cc1 -verify=pedantic -std=c17 -pedantic %s
+// RUN: %clang_cc1 -verify=compat -std=c23 -Wpre-c23-compat %s
+
+// expected-no-diagnostics
+
+/* WG14 N2549: Clang 9
+ * Binary literals
+ */
+
+int i = 0b01; /* pedantic-warning {{binary integer literals are a C23 extension}}
+ compat-warning {{binary integer literals are incompatible with C standards before C23}}
+ */
+
diff --git a/clang/test/CMakeLists.txt b/clang/test/CMakeLists.txt
index f17ded4..841317c 100644
--- a/clang/test/CMakeLists.txt
+++ b/clang/test/CMakeLists.txt
@@ -131,6 +131,7 @@ if( NOT CLANG_BUILT_STANDALONE )
llvm-rc
llvm-readelf
llvm-readobj
+ llvm-readtapi
llvm-strip
llvm-symbolizer
llvm-windres
diff --git a/clang/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.type.elab/p3.cpp b/clang/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.type.elab/p3.cpp
index 1940651..8bdd4905 100644
--- a/clang/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.type.elab/p3.cpp
+++ b/clang/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.type.elab/p3.cpp
@@ -16,10 +16,10 @@ class A1 {
friend union A; // expected-error {{use of 'A' with tag type that does not match previous declaration}}
friend enum A; // expected-error {{use of 'A' with tag type that does not match previous declaration}}
- friend enum E;
-#if __cplusplus <= 199711L // C++03 or earlier modes
- // expected-warning@-2 {{befriending enumeration type 'enum E' is a C++11 extension}}
-#endif
+ // expected-warning@-1 {{cannot be declared as a friend}}
+ // expected-note@-2 {{remove 'enum' to befriend an enum}}
+ friend enum E; // expected-warning {{cannot be declared as a friend}}
+ // expected-note@-1 {{remove 'enum' to befriend an enum}}
};
template <class T> struct B { // expected-note {{previous use is here}}
diff --git a/clang/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.type.elab/p4.cpp b/clang/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.type.elab/p4.cpp
new file mode 100644
index 0000000..b516b1f
--- /dev/null
+++ b/clang/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.type.elab/p4.cpp
@@ -0,0 +1,40 @@
+// RUN: %clang_cc1 -verify %s -std=c++11 -pedantic-errors
+
+enum class E;
+
+template<typename T>
+struct A {
+ enum class F;
+};
+
+struct B {
+ template<typename T>
+ friend enum A<T>::F; // expected-error {{elaborated enum specifier cannot be declared as a friend}}
+ // expected-note@-1 {{remove 'enum' to befriend an enum}}
+
+ // FIXME: Per [temp.expl.spec]p19, a friend declaration cannot be an explicit specialization
+ template<>
+ friend enum A<int>::F; // expected-error {{elaborated enum specifier cannot be declared as a friend}}
+ // expected-note@-1 {{remove 'enum' to befriend an enum}}
+
+ enum class G;
+
+ friend enum E; // expected-error {{elaborated enum specifier cannot be declared as a friend}}
+ // expected-note@-1 {{remove 'enum' to befriend an enum}}
+};
+
+template<typename T>
+struct C {
+ friend enum T::G; // expected-error {{elaborated enum specifier cannot be declared as a friend}}
+ // expected-note@-1 {{remove 'enum' to befriend an enum}}
+ friend enum A<T>::G; // expected-error {{elaborated enum specifier cannot be declared as a friend}}
+ // expected-note@-1 {{remove 'enum' to befriend an enum}}
+};
+
+struct D {
+ friend enum B::G; // expected-error {{elaborated enum specifier cannot be declared as a friend}}
+ // expected-note@-1 {{remove 'enum' to befriend an enum}}
+ friend enum class B::G; // expected-error {{elaborated enum specifier cannot be declared as a friend}}
+ // expected-note@-1 {{remove 'enum class' to befriend an enum}}
+ // expected-error@-2 {{reference to enumeration must use 'enum' not 'enum class'}}
+};
diff --git a/clang/test/CXX/drs/dr16xx.cpp b/clang/test/CXX/drs/dr16xx.cpp
index 6ce77fb..2dd7d15 100644
--- a/clang/test/CXX/drs/dr16xx.cpp
+++ b/clang/test/CXX/drs/dr16xx.cpp
@@ -61,7 +61,7 @@ namespace dr1631 { // dr1631: 3.7
void f(B, int); // TODO: expected- note {{candidate function}}
void f(int, A); // #dr1631-f
void f(int, A, int = 0); // #dr1631-f-int
-
+
void test() {
f({0}, {{1}});
// since-cxx11-error@-1 {{call to 'f' is ambiguous}}
@@ -107,6 +107,8 @@ namespace dr1638 { // dr1638: 3.1
struct B {
friend enum class A<unsigned>::E;
// since-cxx11-error@-1 {{reference to enumeration must use 'enum' not 'enum class'}}
+ // since-cxx11-error@-2 {{elaborated enum specifier cannot be declared as a friend}}
+ // since-cxx11-note@-3 {{remove 'enum class' to befriend an enum}}
};
#endif
}
@@ -179,7 +181,7 @@ namespace dr1658 { // dr1658: 5
// In all other cases, we are not so lucky.
struct E : A { E(); virtual void foo() = 0; }; // #dr1658-E1
E::E() = default; // #dr1658-E1-ctor
- // cxx98-error@-1 {{defaulted function definitions are a C++11 extension}}
+ // cxx98-error@-1 {{defaulted function definitions are a C++11 extension}}
// cxx98-error@-2 {{base class 'A' has private default constructor}}
// cxx98-note@-3 {{in defaulted default constructor for 'dr1658::DefCtor::E' first required here}}
// cxx98-note@#dr1658-A1 {{implicitly declared private here}}
@@ -188,7 +190,7 @@ namespace dr1658 { // dr1658: 5
struct F : virtual A { F(); }; // #dr1658-F1
F::F() = default; // #dr1658-F1-ctor
// cxx98-error@-1 {{defaulted function definitions are a C++11 extension}}
- // cxx98-error@-2 {{inherited virtual base class 'A' has private default constructor}}
+ // cxx98-error@-2 {{inherited virtual base class 'A' has private default constructor}}
// cxx98-note@-3 {{in defaulted default constructor for 'dr1658::DefCtor::F' first required here}}
// cxx98-note@#dr1658-A1 {{implicitly declared private here}}
// since-cxx11-error@#dr1658-F1-ctor {{defaulting this default constructor would delete it after its first declaration}}
diff --git a/clang/test/CXX/drs/dr201.cpp b/clang/test/CXX/drs/dr201.cpp
new file mode 100644
index 0000000..7e86498
--- /dev/null
+++ b/clang/test/CXX/drs/dr201.cpp
@@ -0,0 +1,42 @@
+// RUN: %clang_cc1 -std=c++98 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
+// RUN: %clang_cc1 -std=c++11 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
+// RUN: %clang_cc1 -std=c++14 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
+// RUN: %clang_cc1 -std=c++17 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
+// RUN: %clang_cc1 -std=c++20 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
+// RUN: %clang_cc1 -std=c++23 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
+// RUN: %clang_cc1 -std=c++2c %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
+
+#if __cplusplus == 199711L
+#define NOTHROW throw()
+#else
+#define NOTHROW noexcept(true)
+#endif
+
+namespace dr201 { // dr201: 2.8
+
+extern void full_expr_fence() NOTHROW;
+
+struct A {
+ ~A() NOTHROW {}
+};
+
+struct B {
+ B(A) NOTHROW {}
+ ~B() NOTHROW {}
+};
+
+void foo() {
+ full_expr_fence();
+ B b = A();
+ full_expr_fence();
+}
+
+// CHECK-LABEL: define {{.*}} void @dr201::foo()
+// CHECK: call void @dr201::full_expr_fence()
+// CHECK: call void @dr201::B::B(dr201::A)
+// CHECK: call void @dr201::A::~A()
+// CHECK: call void @dr201::full_expr_fence()
+// CHECK: call void @dr201::B::~B()
+// CHECK-LABEL: }
+
+} // namespace dr201
diff --git a/clang/test/CXX/drs/dr210.cpp b/clang/test/CXX/drs/dr210.cpp
new file mode 100644
index 0000000..156ee81
--- /dev/null
+++ b/clang/test/CXX/drs/dr210.cpp
@@ -0,0 +1,41 @@
+// RUN: %clang_cc1 -std=c++98 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
+// RUN: %clang_cc1 -std=c++11 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
+// RUN: %clang_cc1 -std=c++14 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
+// RUN: %clang_cc1 -std=c++17 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
+// RUN: %clang_cc1 -std=c++20 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
+// RUN: %clang_cc1 -std=c++23 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
+// RUN: %clang_cc1 -std=c++2c %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
+
+#if __cplusplus == 199711L
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wvariadic-macros"
+#define static_assert(...) __extension__ _Static_assert(__VA_ARGS__)
+#pragma clang diagnostic pop
+#endif
+
+namespace dr210 { // dr210: 2.7
+struct B {
+ long i;
+ B();
+ virtual ~B();
+};
+
+static_assert(sizeof(B) == 16, "");
+
+struct D : B {
+ long j;
+ D();
+};
+
+static_assert(sizeof(D) == 24, "");
+
+void toss(const B* b) {
+ throw *b;
+}
+
+// CHECK-LABEL: define {{.*}} void @dr210::toss(dr210::B const*)
+// CHECK: %[[EXCEPTION:.*]] = call ptr @__cxa_allocate_exception(i64 16)
+// CHECK: call void @__cxa_throw(ptr %[[EXCEPTION]], ptr @typeinfo for dr210::B, ptr @dr210::B::~B())
+// CHECK-LABEL: }
+
+} // namespace dr210
diff --git a/clang/test/CXX/drs/dr23xx.cpp b/clang/test/CXX/drs/dr23xx.cpp
index c046373..38c6f8a 100644
--- a/clang/test/CXX/drs/dr23xx.cpp
+++ b/clang/test/CXX/drs/dr23xx.cpp
@@ -261,9 +261,9 @@ namespace dr2396 { // dr2396: no
// FIXME: per P1787 "Calling a conversion function" example, all of the
// examples below are well-formed, with B resolving to A::B, but currently
- // it's been resolved to dr2396::B.
+ // it's been resolved to dr2396::B.
- // void f(A a) { a.operator B B::*(); }
+ // void f(A a) { a.operator B B::*(); }
// void g(A a) { a.operator decltype(B()) B::*(); }
// void g2(A a) { a.operator B decltype(B())::*(); }
}
@@ -277,4 +277,38 @@ namespace dr2397 { // dr2397: 17
auto (*c)[5] = &a;
}
} // namespace dr2397
+
+// CWG2363 was closed as NAD, but its resolution does affirm that
+// a friend declaration cannot have an opaque-enumm-specifier.
+namespace dr2363 { // dr2363: yes
+
+enum class E0;
+enum E1 : int;
+
+struct A {
+ friend enum class E0;
+ // since-cxx11-error@-1 {{reference to enumeration must use 'enum' not 'enum class'}}
+ // expected-error@-2 {{elaborated enum specifier cannot be declared as a friend}}
+ // expected-note@-3 {{remove 'enum class' to befriend an enum}}
+
+ friend enum E0;
+ // expected-error@-1 {{elaborated enum specifier cannot be declared as a friend}}
+ // expected-note@-2 {{remove 'enum' to befriend an enum}}
+
+ friend enum class E1;
+ // since-cxx11-error@-1 {{reference to enumeration must use 'enum' not 'enum class'}}
+ // expected-error@-2 {{elaborated enum specifier cannot be declared as a friend}}
+ // expected-note@-3 {{remove 'enum class' to befriend an enum}}
+
+ friend enum E1;
+ // expected-error@-1 {{elaborated enum specifier cannot be declared as a friend}}
+ // expected-note@-2 {{remove 'enum' to befriend an enum}}
+
+ friend enum class E2;
+ // since-cxx11-error@-1 {{reference to enumeration must use 'enum' not 'enum class'}}
+ // expected-error@-2 {{elaborated enum specifier cannot be declared as a friend}}
+ // expected-note@-3 {{remove 'enum class' to befriend an enum}}
+};
+} // namespace dr2363
+
#endif
diff --git a/clang/test/CXX/drs/dr292.cpp b/clang/test/CXX/drs/dr292.cpp
new file mode 100644
index 0000000..19caeef
--- /dev/null
+++ b/clang/test/CXX/drs/dr292.cpp
@@ -0,0 +1,30 @@
+// RUN: %clang_cc1 -std=c++98 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
+// RUN: %clang_cc1 -std=c++11 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
+// RUN: %clang_cc1 -std=c++14 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
+// RUN: %clang_cc1 -std=c++17 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
+// RUN: %clang_cc1 -std=c++20 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
+// RUN: %clang_cc1 -std=c++23 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
+// RUN: %clang_cc1 -std=c++2c %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
+
+namespace dr292 { // dr292: 2.9
+
+extern int g();
+
+struct A {
+ A(int) throw() {}
+};
+
+void f() {
+ new A(g());
+}
+
+// CHECK-LABEL: define {{.*}} void @dr292::f()()
+// CHECK: %[[CALL:.+]] = call {{.*}} @operator new(unsigned long)({{.*}})
+// CHECK: invoke {{.*}} i32 @dr292::g()()
+// CHECK-NEXT: to {{.*}} unwind label %lpad
+// CHECK-LABEL: lpad:
+// CHECK: call void @operator delete(void*)(ptr {{.*}} %[[CALL]])
+// CHECK-LABEL: eh.resume:
+// CHECK-LABEL: }
+
+} // namespace dr292
diff --git a/clang/test/CXX/drs/dr2xx.cpp b/clang/test/CXX/drs/dr2xx.cpp
index 1a3ac53..cbb8734 100644
--- a/clang/test/CXX/drs/dr2xx.cpp
+++ b/clang/test/CXX/drs/dr2xx.cpp
@@ -26,7 +26,7 @@ namespace dr200 { // dr200: dup 214
}
}
-// dr201 FIXME: write codegen test
+// dr201 is in dr201.cpp
namespace dr202 { // dr202: 3.1
template<typename T> T f();
@@ -76,7 +76,7 @@ namespace dr209 { // dr209: 3.2
};
}
-// dr210 FIXME: write codegen test
+// dr210 is in dr210.cpp
namespace dr211 { // dr211: yes
struct A {
@@ -1188,7 +1188,7 @@ namespace dr289 { // dr289: yes
// dr290: na
// dr291: dup 391
-// dr292 FIXME: write a codegen test
+// dr292 is in dr292.cpp
namespace dr294 { // dr294: no
void f() throw(int);
diff --git a/clang/test/CXX/over/over.match/over.match.best/p1-2a.cpp b/clang/test/CXX/over/over.match/over.match.best/p1-2a.cpp
index dae1ba76..db3e3e3 100644
--- a/clang/test/CXX/over/over.match/over.match.best/p1-2a.cpp
+++ b/clang/test/CXX/over/over.match/over.match.best/p1-2a.cpp
@@ -97,13 +97,16 @@ namespace non_template
static_assert(is_same_v<decltype(baz<int>()), int>); // expected-error {{call to 'baz' is ambiguous}}
static_assert(is_same_v<decltype(bar<int>()), void>); // expected-error {{call to 'bar' is ambiguous}}
+ // Top-level cv-qualifiers are ignored in template partial ordering per [dcl.fct]/p5.
+ // After producing the list of parameter types, any top-level cv-qualifiers modifying
+ // a parameter type are deleted when forming the function type.
template<typename T>
- constexpr int goo(int a) requires AtLeast2<int> && true { // expected-note {{candidate function}}
+ constexpr int goo(T a) requires AtLeast2<T> && true {
return 1;
}
template<typename T>
- constexpr int goo(const int b) requires AtLeast2<int> { // expected-note {{candidate function}}
+ constexpr int goo(const T b) requires AtLeast2<T> {
return 2;
}
@@ -122,7 +125,6 @@ namespace non_template
return 2;
}
- // By temp.func.order-6.2.2, this is ambiguous because parameter a and b have different types.
- static_assert(goo<int>(1) == 1); // expected-error {{call to 'goo' is ambiguous}}
+ static_assert(goo<int>(1) == 1);
static_assert(doo<int>(2) == 1);
}
diff --git a/clang/test/CXX/temp/temp.decls/temp.class/temp.mem.enum/p1.cpp b/clang/test/CXX/temp/temp.decls/temp.class/temp.mem.enum/p1.cpp
index 2884be1..e580799 100644
--- a/clang/test/CXX/temp/temp.decls/temp.class/temp.mem.enum/p1.cpp
+++ b/clang/test/CXX/temp/temp.decls/temp.class/temp.mem.enum/p1.cpp
@@ -101,10 +101,14 @@ template<> enum class D<short>::E;
struct F {
// Per C++11 [class.friend]p3, these friend declarations have no effect.
// Only classes and functions can be friends.
- template<typename T> friend enum D<T>::E;
- template<> friend enum D<short>::E;
+ template<typename T> friend enum D<T>::E; // expected-warning {{elaborated enum specifier cannot be declared as a friend}}
+ // expected-note@-1 {{remove 'enum' to befriend an enum}}
+ template<> friend enum D<short>::E; // expected-warning {{elaborated enum specifier cannot be declared as a friend}}
+ // expected-note@-1 {{remove 'enum' to befriend an enum}}
template<> friend enum D<double>::E { e3 }; // expected-error {{cannot define a type in a friend declaration}}
+ // expected-warning@-1 {{elaborated enum specifier cannot be declared as a friend}}
+ // expected-note@-2 {{remove 'enum' to befriend an enum}}
private:
static const int n = 1; // expected-note {{private here}}
diff --git a/clang/test/CodeGen/RISCV/ntlh-intrinsics/riscv32-zihintntl.c b/clang/test/CodeGen/RISCV/ntlh-intrinsics/riscv32-zihintntl.c
index 897edbc..b11c2ca 100644
--- a/clang/test/CodeGen/RISCV/ntlh-intrinsics/riscv32-zihintntl.c
+++ b/clang/test/CodeGen/RISCV/ntlh-intrinsics/riscv32-zihintntl.c
@@ -28,190 +28,190 @@ vint8m1_t *scvc1, *scvc2;
// clang-format off
void ntl_all_sizes() { // CHECK-LABEL: ntl_all_sizes
- uc = __riscv_ntl_load(&sc, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load i8{{.*}}align 1, !nontemporal !4, !riscv-nontemporal-domain !5
- sc = __riscv_ntl_load(&uc, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load i8{{.*}}align 1, !nontemporal !4, !riscv-nontemporal-domain !5
- us = __riscv_ntl_load(&ss, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load i16{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !5
- ss = __riscv_ntl_load(&us, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load i16{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !5
- ui = __riscv_ntl_load(&si, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load i32{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !5
- si = __riscv_ntl_load(&ui, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load i32{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !5
- ull = __riscv_ntl_load(&sll, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load i64{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !5
- sll = __riscv_ntl_load(&ull, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load i64{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !5
- h1 = __riscv_ntl_load(&h2, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load half{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !5
- f1 = __riscv_ntl_load(&f2, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load float{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !5
- d1 = __riscv_ntl_load(&d2, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load double{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !5
- v4si1 = __riscv_ntl_load(&v4si2, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load <4 x i32>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !5
- v8ss1 = __riscv_ntl_load(&v8ss2, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load <8 x i16>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !5
- v16sc1 = __riscv_ntl_load(&v16sc2, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load <16 x i8>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !5
- *scvi1 = __riscv_ntl_load(scvi2, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load <vscale x 2 x i32>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !5
- *scvs1 = __riscv_ntl_load(scvs2, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load <vscale x 4 x i16>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !5
- *scvc1 = __riscv_ntl_load(scvc2, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load <vscale x 8 x i8>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !5
+ uc = __riscv_ntl_load(&sc, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load i8{{.*}}align 1, !nontemporal !6, !riscv-nontemporal-domain !7
+ sc = __riscv_ntl_load(&uc, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load i8{{.*}}align 1, !nontemporal !6, !riscv-nontemporal-domain !7
+ us = __riscv_ntl_load(&ss, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load i16{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !7
+ ss = __riscv_ntl_load(&us, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load i16{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !7
+ ui = __riscv_ntl_load(&si, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load i32{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !7
+ si = __riscv_ntl_load(&ui, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load i32{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !7
+ ull = __riscv_ntl_load(&sll, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load i64{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !7
+ sll = __riscv_ntl_load(&ull, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load i64{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !7
+ h1 = __riscv_ntl_load(&h2, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load half{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !7
+ f1 = __riscv_ntl_load(&f2, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load float{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !7
+ d1 = __riscv_ntl_load(&d2, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load double{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !7
+ v4si1 = __riscv_ntl_load(&v4si2, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load <4 x i32>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !7
+ v8ss1 = __riscv_ntl_load(&v8ss2, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load <8 x i16>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !7
+ v16sc1 = __riscv_ntl_load(&v16sc2, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load <16 x i8>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !7
+ *scvi1 = __riscv_ntl_load(scvi2, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load <vscale x 2 x i32>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !7
+ *scvs1 = __riscv_ntl_load(scvs2, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load <vscale x 4 x i16>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !7
+ *scvc1 = __riscv_ntl_load(scvc2, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: load <vscale x 8 x i8>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !7
- uc = __riscv_ntl_load(&sc, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load i8{{.*}}align 1, !nontemporal !4, !riscv-nontemporal-domain !6
- sc = __riscv_ntl_load(&uc, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load i8{{.*}}align 1, !nontemporal !4, !riscv-nontemporal-domain !6
- us = __riscv_ntl_load(&ss, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load i16{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !6
- ss = __riscv_ntl_load(&us, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load i16{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !6
- ui = __riscv_ntl_load(&si, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load i32{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !6
- si = __riscv_ntl_load(&ui, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load i32{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !6
- ull = __riscv_ntl_load(&sll, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load i64{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !6
- sll = __riscv_ntl_load(&ull, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load i64{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !6
- h1 = __riscv_ntl_load(&h2, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load half{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !6
- f1 = __riscv_ntl_load(&f2, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load float{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !6
- d1 = __riscv_ntl_load(&d2, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load double{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !6
- v4si1 = __riscv_ntl_load(&v4si2, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load <4 x i32>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !6
- v8ss1 = __riscv_ntl_load(&v8ss2, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load <8 x i16>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !6
- v16sc1 = __riscv_ntl_load(&v16sc2, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load <16 x i8>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !6
- *scvi1 = __riscv_ntl_load(scvi2, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load <vscale x 2 x i32>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !6
- *scvs1 = __riscv_ntl_load(scvs2, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load <vscale x 4 x i16>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !6
- *scvc1 = __riscv_ntl_load(scvc2, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load <vscale x 8 x i8>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !6
+ uc = __riscv_ntl_load(&sc, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load i8{{.*}}align 1, !nontemporal !6, !riscv-nontemporal-domain !8
+ sc = __riscv_ntl_load(&uc, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load i8{{.*}}align 1, !nontemporal !6, !riscv-nontemporal-domain !8
+ us = __riscv_ntl_load(&ss, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load i16{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !8
+ ss = __riscv_ntl_load(&us, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load i16{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !8
+ ui = __riscv_ntl_load(&si, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load i32{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !8
+ si = __riscv_ntl_load(&ui, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load i32{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !8
+ ull = __riscv_ntl_load(&sll, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load i64{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !8
+ sll = __riscv_ntl_load(&ull, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load i64{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !8
+ h1 = __riscv_ntl_load(&h2, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load half{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !8
+ f1 = __riscv_ntl_load(&f2, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load float{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !8
+ d1 = __riscv_ntl_load(&d2, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load double{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !8
+ v4si1 = __riscv_ntl_load(&v4si2, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load <4 x i32>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !8
+ v8ss1 = __riscv_ntl_load(&v8ss2, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load <8 x i16>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !8
+ v16sc1 = __riscv_ntl_load(&v16sc2, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load <16 x i8>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !8
+ *scvi1 = __riscv_ntl_load(scvi2, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load <vscale x 2 x i32>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !8
+ *scvs1 = __riscv_ntl_load(scvs2, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load <vscale x 4 x i16>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !8
+ *scvc1 = __riscv_ntl_load(scvc2, __RISCV_NTLH_ALL_PRIVATE); // CHECK: load <vscale x 8 x i8>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !8
- uc = __riscv_ntl_load(&sc, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load i8{{.*}}align 1, !nontemporal !4, !riscv-nontemporal-domain !7
- sc = __riscv_ntl_load(&uc, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load i8{{.*}}align 1, !nontemporal !4, !riscv-nontemporal-domain !7
- us = __riscv_ntl_load(&ss, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load i16{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !7
- ss = __riscv_ntl_load(&us, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load i16{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !7
- ui = __riscv_ntl_load(&si, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load i32{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !7
- si = __riscv_ntl_load(&ui, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load i32{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !7
- ull = __riscv_ntl_load(&sll, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load i64{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !7
- sll = __riscv_ntl_load(&ull, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load i64{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !7
- h1 = __riscv_ntl_load(&h2, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load half{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !7
- f1 = __riscv_ntl_load(&f2, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load float{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !7
- d1 = __riscv_ntl_load(&d2, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load double{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !7
- v4si1 = __riscv_ntl_load(&v4si2, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load <4 x i32>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !7
- v8ss1 = __riscv_ntl_load(&v8ss2, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load <8 x i16>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !7
- v16sc1 = __riscv_ntl_load(&v16sc2, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load <16 x i8>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !7
- *scvi1 = __riscv_ntl_load(scvi2, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load <vscale x 2 x i32>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !7
- *scvs1 = __riscv_ntl_load(scvs2, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load <vscale x 4 x i16>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !7
- *scvc1 = __riscv_ntl_load(scvc2, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load <vscale x 8 x i8>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !7
+ uc = __riscv_ntl_load(&sc, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load i8{{.*}}align 1, !nontemporal !6, !riscv-nontemporal-domain !9
+ sc = __riscv_ntl_load(&uc, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load i8{{.*}}align 1, !nontemporal !6, !riscv-nontemporal-domain !9
+ us = __riscv_ntl_load(&ss, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load i16{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !9
+ ss = __riscv_ntl_load(&us, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load i16{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !9
+ ui = __riscv_ntl_load(&si, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load i32{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !9
+ si = __riscv_ntl_load(&ui, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load i32{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !9
+ ull = __riscv_ntl_load(&sll, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load i64{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !9
+ sll = __riscv_ntl_load(&ull, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load i64{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !9
+ h1 = __riscv_ntl_load(&h2, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load half{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !9
+ f1 = __riscv_ntl_load(&f2, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load float{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !9
+ d1 = __riscv_ntl_load(&d2, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load double{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !9
+ v4si1 = __riscv_ntl_load(&v4si2, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load <4 x i32>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !9
+ v8ss1 = __riscv_ntl_load(&v8ss2, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load <8 x i16>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !9
+ v16sc1 = __riscv_ntl_load(&v16sc2, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load <16 x i8>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !9
+ *scvi1 = __riscv_ntl_load(scvi2, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load <vscale x 2 x i32>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !9
+ *scvs1 = __riscv_ntl_load(scvs2, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load <vscale x 4 x i16>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !9
+ *scvc1 = __riscv_ntl_load(scvc2, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: load <vscale x 8 x i8>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !9
- uc = __riscv_ntl_load(&sc, __RISCV_NTLH_ALL); // CHECK: load i8{{.*}}align 1, !nontemporal !4, !riscv-nontemporal-domain !8
- sc = __riscv_ntl_load(&uc, __RISCV_NTLH_ALL); // CHECK: load i8{{.*}}align 1, !nontemporal !4, !riscv-nontemporal-domain !8
- us = __riscv_ntl_load(&ss, __RISCV_NTLH_ALL); // CHECK: load i16{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !8
- ss = __riscv_ntl_load(&us, __RISCV_NTLH_ALL); // CHECK: load i16{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !8
- ui = __riscv_ntl_load(&si, __RISCV_NTLH_ALL); // CHECK: load i32{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !8
- si = __riscv_ntl_load(&ui, __RISCV_NTLH_ALL); // CHECK: load i32{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !8
- ull = __riscv_ntl_load(&sll, __RISCV_NTLH_ALL); // CHECK: load i64{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !8
- sll = __riscv_ntl_load(&ull, __RISCV_NTLH_ALL); // CHECK: load i64{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !8
- h1 = __riscv_ntl_load(&h2, __RISCV_NTLH_ALL); // CHECK: load half{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !8
- f1 = __riscv_ntl_load(&f2, __RISCV_NTLH_ALL); // CHECK: load float{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !8
- d1 = __riscv_ntl_load(&d2, __RISCV_NTLH_ALL); // CHECK: load double{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !8
- v4si1 = __riscv_ntl_load(&v4si2, __RISCV_NTLH_ALL); // CHECK: load <4 x i32>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !8
- v8ss1 = __riscv_ntl_load(&v8ss2, __RISCV_NTLH_ALL); // CHECK: load <8 x i16>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !8
- v16sc1 = __riscv_ntl_load(&v16sc2, __RISCV_NTLH_ALL); // CHECK: load <16 x i8>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !8
- *scvi1 = __riscv_ntl_load(scvi2, __RISCV_NTLH_ALL); // CHECK: load <vscale x 2 x i32>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !8
- *scvs1 = __riscv_ntl_load(scvs2, __RISCV_NTLH_ALL); // CHECK: load <vscale x 4 x i16>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !8
- *scvc1 = __riscv_ntl_load(scvc2, __RISCV_NTLH_ALL); // CHECK: load <vscale x 8 x i8>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !8
+ uc = __riscv_ntl_load(&sc, __RISCV_NTLH_ALL); // CHECK: load i8{{.*}}align 1, !nontemporal !6, !riscv-nontemporal-domain !10
+ sc = __riscv_ntl_load(&uc, __RISCV_NTLH_ALL); // CHECK: load i8{{.*}}align 1, !nontemporal !6, !riscv-nontemporal-domain !10
+ us = __riscv_ntl_load(&ss, __RISCV_NTLH_ALL); // CHECK: load i16{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !10
+ ss = __riscv_ntl_load(&us, __RISCV_NTLH_ALL); // CHECK: load i16{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !10
+ ui = __riscv_ntl_load(&si, __RISCV_NTLH_ALL); // CHECK: load i32{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !10
+ si = __riscv_ntl_load(&ui, __RISCV_NTLH_ALL); // CHECK: load i32{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !10
+ ull = __riscv_ntl_load(&sll, __RISCV_NTLH_ALL); // CHECK: load i64{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !10
+ sll = __riscv_ntl_load(&ull, __RISCV_NTLH_ALL); // CHECK: load i64{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !10
+ h1 = __riscv_ntl_load(&h2, __RISCV_NTLH_ALL); // CHECK: load half{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !10
+ f1 = __riscv_ntl_load(&f2, __RISCV_NTLH_ALL); // CHECK: load float{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !10
+ d1 = __riscv_ntl_load(&d2, __RISCV_NTLH_ALL); // CHECK: load double{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !10
+ v4si1 = __riscv_ntl_load(&v4si2, __RISCV_NTLH_ALL); // CHECK: load <4 x i32>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !10
+ v8ss1 = __riscv_ntl_load(&v8ss2, __RISCV_NTLH_ALL); // CHECK: load <8 x i16>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !10
+ v16sc1 = __riscv_ntl_load(&v16sc2, __RISCV_NTLH_ALL); // CHECK: load <16 x i8>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !10
+ *scvi1 = __riscv_ntl_load(scvi2, __RISCV_NTLH_ALL); // CHECK: load <vscale x 2 x i32>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !10
+ *scvs1 = __riscv_ntl_load(scvs2, __RISCV_NTLH_ALL); // CHECK: load <vscale x 4 x i16>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !10
+ *scvc1 = __riscv_ntl_load(scvc2, __RISCV_NTLH_ALL); // CHECK: load <vscale x 8 x i8>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !10
- uc = __riscv_ntl_load(&sc); // CHECK: load i8{{.*}}align 1, !nontemporal !4, !riscv-nontemporal-domain !8
- sc = __riscv_ntl_load(&uc); // CHECK: load i8{{.*}}align 1, !nontemporal !4, !riscv-nontemporal-domain !8
- us = __riscv_ntl_load(&ss); // CHECK: load i16{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !8
- ss = __riscv_ntl_load(&us); // CHECK: load i16{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !8
- ui = __riscv_ntl_load(&si); // CHECK: load i32{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !8
- si = __riscv_ntl_load(&ui); // CHECK: load i32{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !8
- ull = __riscv_ntl_load(&sll); // CHECK: load i64{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !8
- sll = __riscv_ntl_load(&ull); // CHECK: load i64{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !8
- h1 = __riscv_ntl_load(&h2); // CHECK: load half{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !8
- f1 = __riscv_ntl_load(&f2); // CHECK: load float{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !8
- d1 = __riscv_ntl_load(&d2); // CHECK: load double{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !8
- v4si1 = __riscv_ntl_load(&v4si2); // CHECK: load <4 x i32>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !8
- v8ss1 = __riscv_ntl_load(&v8ss2); // CHECK: load <8 x i16>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !8
- v16sc1 = __riscv_ntl_load(&v16sc2); // CHECK: load <16 x i8>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !8
- *scvi1 = __riscv_ntl_load(scvi2); // CHECK: load <vscale x 2 x i32>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !8
- *scvs1 = __riscv_ntl_load(scvs2); // CHECK: load <vscale x 4 x i16>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !8
- *scvc1 = __riscv_ntl_load(scvc2); // CHECK: load <vscale x 8 x i8>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !8
+ uc = __riscv_ntl_load(&sc); // CHECK: load i8{{.*}}align 1, !nontemporal !6, !riscv-nontemporal-domain !10
+ sc = __riscv_ntl_load(&uc); // CHECK: load i8{{.*}}align 1, !nontemporal !6, !riscv-nontemporal-domain !10
+ us = __riscv_ntl_load(&ss); // CHECK: load i16{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !10
+ ss = __riscv_ntl_load(&us); // CHECK: load i16{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !10
+ ui = __riscv_ntl_load(&si); // CHECK: load i32{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !10
+ si = __riscv_ntl_load(&ui); // CHECK: load i32{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !10
+ ull = __riscv_ntl_load(&sll); // CHECK: load i64{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !10
+ sll = __riscv_ntl_load(&ull); // CHECK: load i64{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !10
+ h1 = __riscv_ntl_load(&h2); // CHECK: load half{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !10
+ f1 = __riscv_ntl_load(&f2); // CHECK: load float{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !10
+ d1 = __riscv_ntl_load(&d2); // CHECK: load double{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !10
+ v4si1 = __riscv_ntl_load(&v4si2); // CHECK: load <4 x i32>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !10
+ v8ss1 = __riscv_ntl_load(&v8ss2); // CHECK: load <8 x i16>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !10
+ v16sc1 = __riscv_ntl_load(&v16sc2); // CHECK: load <16 x i8>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !10
+ *scvi1 = __riscv_ntl_load(scvi2); // CHECK: load <vscale x 2 x i32>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !10
+ *scvs1 = __riscv_ntl_load(scvs2); // CHECK: load <vscale x 4 x i16>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !10
+ *scvc1 = __riscv_ntl_load(scvc2); // CHECK: load <vscale x 8 x i8>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !10
- __riscv_ntl_store(&uc, 1, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store i8{{.*}}align 1, !nontemporal !4, !riscv-nontemporal-domain !5
- __riscv_ntl_store(&sc, 1, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store i8{{.*}}align 1, !nontemporal !4, !riscv-nontemporal-domain !5
- __riscv_ntl_store(&us, 1, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store i16{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !5
- __riscv_ntl_store(&ss, 1, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store i16{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !5
- __riscv_ntl_store(&ui, 1, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store i32{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !5
- __riscv_ntl_store(&si, 1, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store i32{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !5
- __riscv_ntl_store(&ull, 1, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store i64{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !5
- __riscv_ntl_store(&sll, 1, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store i64{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !5
- __riscv_ntl_store(&h1, 1.0, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store half{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !5
- __riscv_ntl_store(&f1, 1.0, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store float{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !5
- __riscv_ntl_store(&d1, 1.0, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store double{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !5
- __riscv_ntl_store(&v4si1, v4si2, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store <4 x i32>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !5
- __riscv_ntl_store(&v8ss1, v8ss2, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store <8 x i16>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !5
- __riscv_ntl_store(&v16sc1, v16sc2, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store <16 x i8>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !5
- __riscv_ntl_store(scvi2, *scvi1, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store <vscale x 2 x i32>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !5
- __riscv_ntl_store(scvs2, *scvs1, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store <vscale x 4 x i16>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !5
- __riscv_ntl_store(scvc2, *scvc1, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store <vscale x 8 x i8>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !5
+ __riscv_ntl_store(&uc, 1, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store i8{{.*}}align 1, !nontemporal !6, !riscv-nontemporal-domain !7
+ __riscv_ntl_store(&sc, 1, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store i8{{.*}}align 1, !nontemporal !6, !riscv-nontemporal-domain !7
+ __riscv_ntl_store(&us, 1, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store i16{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !7
+ __riscv_ntl_store(&ss, 1, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store i16{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !7
+ __riscv_ntl_store(&ui, 1, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store i32{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !7
+ __riscv_ntl_store(&si, 1, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store i32{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !7
+ __riscv_ntl_store(&ull, 1, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store i64{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !7
+ __riscv_ntl_store(&sll, 1, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store i64{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !7
+ __riscv_ntl_store(&h1, 1.0, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store half{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !7
+ __riscv_ntl_store(&f1, 1.0, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store float{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !7
+ __riscv_ntl_store(&d1, 1.0, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store double{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !7
+ __riscv_ntl_store(&v4si1, v4si2, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store <4 x i32>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !7
+ __riscv_ntl_store(&v8ss1, v8ss2, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store <8 x i16>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !7
+ __riscv_ntl_store(&v16sc1, v16sc2, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store <16 x i8>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !7
+ __riscv_ntl_store(scvi2, *scvi1, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store <vscale x 2 x i32>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !7
+ __riscv_ntl_store(scvs2, *scvs1, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store <vscale x 4 x i16>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !7
+ __riscv_ntl_store(scvc2, *scvc1, __RISCV_NTLH_INNERMOST_PRIVATE); // CHECK: store <vscale x 8 x i8>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !7
- __riscv_ntl_store(&uc, 1, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store i8{{.*}}align 1, !nontemporal !4, !riscv-nontemporal-domain !6
- __riscv_ntl_store(&sc, 1, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store i8{{.*}}align 1, !nontemporal !4, !riscv-nontemporal-domain !6
- __riscv_ntl_store(&us, 1, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store i16{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !6
- __riscv_ntl_store(&ss, 1, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store i16{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !6
- __riscv_ntl_store(&ui, 1, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store i32{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !6
- __riscv_ntl_store(&si, 1, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store i32{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !6
- __riscv_ntl_store(&ull, 1, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store i64{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !6
- __riscv_ntl_store(&sll, 1, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store i64{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !6
- __riscv_ntl_store(&h1, 1.0, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store half{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !6
- __riscv_ntl_store(&f1, 1.0, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store float{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !6
- __riscv_ntl_store(&d1, 1.0, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store double{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !6
- __riscv_ntl_store(&v4si1, v4si2, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store <4 x i32>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !6
- __riscv_ntl_store(&v8ss1, v8ss2, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store <8 x i16>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !6
- __riscv_ntl_store(&v16sc1, v16sc2, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store <16 x i8>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !6
- __riscv_ntl_store(scvi2, *scvi1, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store <vscale x 2 x i32>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !6
- __riscv_ntl_store(scvs2, *scvs1, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store <vscale x 4 x i16>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !6
- __riscv_ntl_store(scvc2, *scvc1, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store <vscale x 8 x i8>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !6
+ __riscv_ntl_store(&uc, 1, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store i8{{.*}}align 1, !nontemporal !6, !riscv-nontemporal-domain !8
+ __riscv_ntl_store(&sc, 1, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store i8{{.*}}align 1, !nontemporal !6, !riscv-nontemporal-domain !8
+ __riscv_ntl_store(&us, 1, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store i16{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !8
+ __riscv_ntl_store(&ss, 1, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store i16{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !8
+ __riscv_ntl_store(&ui, 1, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store i32{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !8
+ __riscv_ntl_store(&si, 1, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store i32{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !8
+ __riscv_ntl_store(&ull, 1, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store i64{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !8
+ __riscv_ntl_store(&sll, 1, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store i64{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !8
+ __riscv_ntl_store(&h1, 1.0, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store half{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !8
+ __riscv_ntl_store(&f1, 1.0, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store float{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !8
+ __riscv_ntl_store(&d1, 1.0, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store double{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !8
+ __riscv_ntl_store(&v4si1, v4si2, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store <4 x i32>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !8
+ __riscv_ntl_store(&v8ss1, v8ss2, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store <8 x i16>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !8
+ __riscv_ntl_store(&v16sc1, v16sc2, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store <16 x i8>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !8
+ __riscv_ntl_store(scvi2, *scvi1, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store <vscale x 2 x i32>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !8
+ __riscv_ntl_store(scvs2, *scvs1, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store <vscale x 4 x i16>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !8
+ __riscv_ntl_store(scvc2, *scvc1, __RISCV_NTLH_ALL_PRIVATE); // CHECK: store <vscale x 8 x i8>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&uc, 1, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store i8{{.*}}align 1, !nontemporal !4, !riscv-nontemporal-domain !7
- __riscv_ntl_store(&sc, 1, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store i8{{.*}}align 1, !nontemporal !4, !riscv-nontemporal-domain !7
- __riscv_ntl_store(&us, 1, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store i16{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !7
- __riscv_ntl_store(&ss, 1, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store i16{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !7
- __riscv_ntl_store(&ui, 1, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store i32{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !7
- __riscv_ntl_store(&si, 1, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store i32{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !7
- __riscv_ntl_store(&ull, 1, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store i64{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !7
- __riscv_ntl_store(&sll, 1, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store i64{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !7
- __riscv_ntl_store(&h1, 1.0, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store half{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !7
- __riscv_ntl_store(&f1, 1.0, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store float{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !7
- __riscv_ntl_store(&d1, 1.0, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store double{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !7
- __riscv_ntl_store(&v4si1, v4si2, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store <4 x i32>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !7
- __riscv_ntl_store(&v8ss1, v8ss2, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store <8 x i16>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !7
- __riscv_ntl_store(&v16sc1, v16sc2, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store <16 x i8>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !7
- __riscv_ntl_store(scvi2, *scvi1, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store <vscale x 2 x i32>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !7
- __riscv_ntl_store(scvs2, *scvs1, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store <vscale x 4 x i16>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !7
- __riscv_ntl_store(scvc2, *scvc1, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store <vscale x 8 x i8>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !7
+ __riscv_ntl_store(&uc, 1, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store i8{{.*}}align 1, !nontemporal !6, !riscv-nontemporal-domain !9
+ __riscv_ntl_store(&sc, 1, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store i8{{.*}}align 1, !nontemporal !6, !riscv-nontemporal-domain !9
+ __riscv_ntl_store(&us, 1, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store i16{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !9
+ __riscv_ntl_store(&ss, 1, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store i16{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !9
+ __riscv_ntl_store(&ui, 1, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store i32{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !9
+ __riscv_ntl_store(&si, 1, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store i32{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !9
+ __riscv_ntl_store(&ull, 1, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store i64{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !9
+ __riscv_ntl_store(&sll, 1, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store i64{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !9
+ __riscv_ntl_store(&h1, 1.0, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store half{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !9
+ __riscv_ntl_store(&f1, 1.0, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store float{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !9
+ __riscv_ntl_store(&d1, 1.0, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store double{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !9
+ __riscv_ntl_store(&v4si1, v4si2, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store <4 x i32>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !9
+ __riscv_ntl_store(&v8ss1, v8ss2, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store <8 x i16>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !9
+ __riscv_ntl_store(&v16sc1, v16sc2, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store <16 x i8>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !9
+ __riscv_ntl_store(scvi2, *scvi1, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store <vscale x 2 x i32>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !9
+ __riscv_ntl_store(scvs2, *scvs1, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store <vscale x 4 x i16>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !9
+ __riscv_ntl_store(scvc2, *scvc1, __RISCV_NTLH_INNERMOST_SHARED); // CHECK: store <vscale x 8 x i8>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !9
- __riscv_ntl_store(&uc, 1, __RISCV_NTLH_ALL); // CHECK: store i8{{.*}}align 1, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&sc, 1, __RISCV_NTLH_ALL); // CHECK: store i8{{.*}}align 1, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&us, 1, __RISCV_NTLH_ALL); // CHECK: store i16{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&ss, 1, __RISCV_NTLH_ALL); // CHECK: store i16{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&ui, 1, __RISCV_NTLH_ALL); // CHECK: store i32{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&si, 1, __RISCV_NTLH_ALL); // CHECK: store i32{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&ull, 1, __RISCV_NTLH_ALL); // CHECK: store i64{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&sll, 1, __RISCV_NTLH_ALL); // CHECK: store i64{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&h1, 1.0, __RISCV_NTLH_ALL); // CHECK: store half{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&f1, 1.0, __RISCV_NTLH_ALL); // CHECK: store float{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&d1, 1.0, __RISCV_NTLH_ALL); // CHECK: store double{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&v4si1, v4si2, __RISCV_NTLH_ALL); // CHECK: store <4 x i32>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&v8ss1, v8ss2, __RISCV_NTLH_ALL); // CHECK: store <8 x i16>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&v16sc1, v16sc2, __RISCV_NTLH_ALL); // CHECK: store <16 x i8>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(scvi2, *scvi1, __RISCV_NTLH_ALL); // CHECK: store <vscale x 2 x i32>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(scvs2, *scvs1, __RISCV_NTLH_ALL); // CHECK: store <vscale x 4 x i16>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(scvc2, *scvc1, __RISCV_NTLH_ALL); // CHECK: store <vscale x 8 x i8>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !8
+ __riscv_ntl_store(&uc, 1, __RISCV_NTLH_ALL); // CHECK: store i8{{.*}}align 1, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(&sc, 1, __RISCV_NTLH_ALL); // CHECK: store i8{{.*}}align 1, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(&us, 1, __RISCV_NTLH_ALL); // CHECK: store i16{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(&ss, 1, __RISCV_NTLH_ALL); // CHECK: store i16{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(&ui, 1, __RISCV_NTLH_ALL); // CHECK: store i32{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(&si, 1, __RISCV_NTLH_ALL); // CHECK: store i32{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(&ull, 1, __RISCV_NTLH_ALL); // CHECK: store i64{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(&sll, 1, __RISCV_NTLH_ALL); // CHECK: store i64{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(&h1, 1.0, __RISCV_NTLH_ALL); // CHECK: store half{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(&f1, 1.0, __RISCV_NTLH_ALL); // CHECK: store float{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(&d1, 1.0, __RISCV_NTLH_ALL); // CHECK: store double{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(&v4si1, v4si2, __RISCV_NTLH_ALL); // CHECK: store <4 x i32>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(&v8ss1, v8ss2, __RISCV_NTLH_ALL); // CHECK: store <8 x i16>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(&v16sc1, v16sc2, __RISCV_NTLH_ALL); // CHECK: store <16 x i8>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(scvi2, *scvi1, __RISCV_NTLH_ALL); // CHECK: store <vscale x 2 x i32>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(scvs2, *scvs1, __RISCV_NTLH_ALL); // CHECK: store <vscale x 4 x i16>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(scvc2, *scvc1, __RISCV_NTLH_ALL); // CHECK: store <vscale x 8 x i8>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !10
- __riscv_ntl_store(&uc, 1); // CHECK: store i8{{.*}}align 1, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&sc, 1); // CHECK: store i8{{.*}}align 1, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&us, 1); // CHECK: store i16{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&ss, 1); // CHECK: store i16{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&ui, 1); // CHECK: store i32{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&si, 1); // CHECK: store i32{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&ull, 1); // CHECK: store i64{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&sll, 1); // CHECK: store i64{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&h1, 1.0); // CHECK: store half{{.*}}align 2, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&f1, 1.0); // CHECK: store float{{.*}}align 4, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&d1, 1.0); // CHECK: store double{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&v4si1, v4si2); // CHECK: store <4 x i32>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&v8ss1, v8ss2); // CHECK: store <8 x i16>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(&v16sc1, v16sc2); // CHECK: store <16 x i8>{{.*}}align 16, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(scvi2, *scvi1); // CHECK: store <vscale x 2 x i32>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(scvs2, *scvs1); // CHECK: store <vscale x 4 x i16>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !8
- __riscv_ntl_store(scvc2, *scvc1); // CHECK: store <vscale x 8 x i8>{{.*}}align 8, !nontemporal !4, !riscv-nontemporal-domain !8
+ __riscv_ntl_store(&uc, 1); // CHECK: store i8{{.*}}align 1, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(&sc, 1); // CHECK: store i8{{.*}}align 1, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(&us, 1); // CHECK: store i16{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(&ss, 1); // CHECK: store i16{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(&ui, 1); // CHECK: store i32{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(&si, 1); // CHECK: store i32{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(&ull, 1); // CHECK: store i64{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(&sll, 1); // CHECK: store i64{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(&h1, 1.0); // CHECK: store half{{.*}}align 2, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(&f1, 1.0); // CHECK: store float{{.*}}align 4, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(&d1, 1.0); // CHECK: store double{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(&v4si1, v4si2); // CHECK: store <4 x i32>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(&v8ss1, v8ss2); // CHECK: store <8 x i16>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(&v16sc1, v16sc2); // CHECK: store <16 x i8>{{.*}}align 16, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(scvi2, *scvi1); // CHECK: store <vscale x 2 x i32>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(scvs2, *scvs1); // CHECK: store <vscale x 4 x i16>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !10
+ __riscv_ntl_store(scvc2, *scvc1); // CHECK: store <vscale x 8 x i8>{{.*}}align 8, !nontemporal !6, !riscv-nontemporal-domain !10
}
// clang-format on
-// CHECK: !4 = !{i32 1}
-// CHECK: !5 = !{i32 2}
-// CHECK: !6 = !{i32 3}
-// CHECK: !7 = !{i32 4}
-// CHECK: !8 = !{i32 5}
+// CHECK: !6 = !{i32 1}
+// CHECK: !7 = !{i32 2}
+// CHECK: !8 = !{i32 3}
+// CHECK: !9 = !{i32 4}
+// CHECK: !10 = !{i32 5}
diff --git a/clang/test/CodeGen/RISCV/riscv-metadata-arch.c b/clang/test/CodeGen/RISCV/riscv-metadata-arch.c
new file mode 100644
index 0000000..060eda1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/riscv-metadata-arch.c
@@ -0,0 +1,20 @@
+// RUN: %clang_cc1 -triple riscv32 -emit-llvm -o - %s \
+// RUN: | FileCheck -check-prefix=RV32I %s
+// RUN: %clang_cc1 -triple riscv32 -target-feature +v -emit-llvm -o - %s \
+// RUN: | FileCheck -check-prefix=RV32IV %s
+// RUN: %clang_cc1 -triple riscv64 -emit-llvm -o - %s \
+// RUN: | FileCheck -check-prefix=RV64I %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -emit-llvm -o - %s \
+// RUN: | FileCheck -check-prefix=RV64IV %s
+
+// RV32I:!{{[0-9]+}} = !{i32 6, !"riscv-isa", ![[ID:[0-9]+]]}
+// RV32I:![[ID]] = !{!"rv32i2p1"}
+
+// RV32IV:!{{[0-9]+}} = !{i32 6, !"riscv-isa", ![[ID:[0-9]+]]}
+// RV32IV:![[ID]] = !{!"rv32i2p1_f2p2_d2p2_v1p0_zicsr2p0_zve32f1p0_zve32x1p0_zve64d1p0_zve64f1p0_zve64x1p0_zvl128b1p0_zvl32b1p0_zvl64b1p0"}
+
+// RV64I:!{{[0-9]+}} = !{i32 6, !"riscv-isa", ![[ID:[0-9]+]]}
+// RV64I:![[ID]] = !{!"rv64i2p1"}
+
+// RV64IV:!{{[0-9]+}} = !{i32 6, !"riscv-isa", ![[ID:[0-9]+]]}
+// RV64IV:![[ID]] = !{!"rv64i2p1_f2p2_d2p2_v1p0_zicsr2p0_zve32f1p0_zve32x1p0_zve64d1p0_zve64f1p0_zve64x1p0_zvl128b1p0_zvl32b1p0_zvl64b1p0"}
diff --git a/clang/test/CodeGen/attr-riscv-rvv-vector-bits-bitcast.c b/clang/test/CodeGen/attr-riscv-rvv-vector-bits-bitcast.c
index a7b3123..20fb4a0 100644
--- a/clang/test/CodeGen/attr-riscv-rvv-vector-bits-bitcast.c
+++ b/clang/test/CodeGen/attr-riscv-rvv-vector-bits-bitcast.c
@@ -177,29 +177,26 @@ void write_float64m1(struct struct_float64m1 *s, vfloat64m1_t x) {
// CHECK-64-LABEL: @read_bool1(
// CHECK-64-NEXT: entry:
-// CHECK-64-NEXT: [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 8
// CHECK-64-NEXT: [[Y:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i64 8
// CHECK-64-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[Y]], align 8, !tbaa [[TBAA4]]
-// CHECK-64-NEXT: store <8 x i8> [[TMP0]], ptr [[SAVED_VALUE]], align 8, !tbaa [[TBAA4]]
-// CHECK-64-NEXT: [[TMP1:%.*]] = load <vscale x 64 x i1>, ptr [[SAVED_VALUE]], align 8, !tbaa [[TBAA4]]
+// CHECK-64-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v8i8(<vscale x 8 x i8> undef, <8 x i8> [[TMP0]], i64 0)
+// CHECK-64-NEXT: [[TMP1:%.*]] = bitcast <vscale x 8 x i8> [[CAST_SCALABLE]] to <vscale x 64 x i1>
// CHECK-64-NEXT: ret <vscale x 64 x i1> [[TMP1]]
//
// CHECK-128-LABEL: @read_bool1(
// CHECK-128-NEXT: entry:
-// CHECK-128-NEXT: [[SAVED_VALUE:%.*]] = alloca <16 x i8>, align 16
// CHECK-128-NEXT: [[Y:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i64 16
// CHECK-128-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Y]], align 8, !tbaa [[TBAA4]]
-// CHECK-128-NEXT: store <16 x i8> [[TMP0]], ptr [[SAVED_VALUE]], align 16, !tbaa [[TBAA4]]
-// CHECK-128-NEXT: [[TMP1:%.*]] = load <vscale x 64 x i1>, ptr [[SAVED_VALUE]], align 16, !tbaa [[TBAA4]]
+// CHECK-128-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v16i8(<vscale x 8 x i8> undef, <16 x i8> [[TMP0]], i64 0)
+// CHECK-128-NEXT: [[TMP1:%.*]] = bitcast <vscale x 8 x i8> [[CAST_SCALABLE]] to <vscale x 64 x i1>
// CHECK-128-NEXT: ret <vscale x 64 x i1> [[TMP1]]
//
// CHECK-256-LABEL: @read_bool1(
// CHECK-256-NEXT: entry:
-// CHECK-256-NEXT: [[SAVED_VALUE:%.*]] = alloca <32 x i8>, align 32
// CHECK-256-NEXT: [[Y:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i64 32
// CHECK-256-NEXT: [[TMP0:%.*]] = load <32 x i8>, ptr [[Y]], align 8, !tbaa [[TBAA4]]
-// CHECK-256-NEXT: store <32 x i8> [[TMP0]], ptr [[SAVED_VALUE]], align 32, !tbaa [[TBAA4]]
-// CHECK-256-NEXT: [[TMP1:%.*]] = load <vscale x 64 x i1>, ptr [[SAVED_VALUE]], align 32, !tbaa [[TBAA4]]
+// CHECK-256-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[TMP0]], i64 0)
+// CHECK-256-NEXT: [[TMP1:%.*]] = bitcast <vscale x 8 x i8> [[CAST_SCALABLE]] to <vscale x 64 x i1>
// CHECK-256-NEXT: ret <vscale x 64 x i1> [[TMP1]]
//
vbool1_t read_bool1(struct struct_bool1 *s) {
@@ -208,29 +205,26 @@ vbool1_t read_bool1(struct struct_bool1 *s) {
// CHECK-64-LABEL: @write_bool1(
// CHECK-64-NEXT: entry:
-// CHECK-64-NEXT: [[SAVED_VALUE:%.*]] = alloca <vscale x 64 x i1>, align 8
-// CHECK-64-NEXT: store <vscale x 64 x i1> [[X:%.*]], ptr [[SAVED_VALUE]], align 8, !tbaa [[TBAA7:![0-9]+]]
-// CHECK-64-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[SAVED_VALUE]], align 8, !tbaa [[TBAA4]]
+// CHECK-64-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i1> [[X:%.*]] to <vscale x 8 x i8>
+// CHECK-64-NEXT: [[CAST_FIXED:%.*]] = tail call <8 x i8> @llvm.vector.extract.v8i8.nxv8i8(<vscale x 8 x i8> [[TMP0]], i64 0)
// CHECK-64-NEXT: [[Y:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i64 8
-// CHECK-64-NEXT: store <8 x i8> [[TMP0]], ptr [[Y]], align 8, !tbaa [[TBAA4]]
+// CHECK-64-NEXT: store <8 x i8> [[CAST_FIXED]], ptr [[Y]], align 8, !tbaa [[TBAA4]]
// CHECK-64-NEXT: ret void
//
// CHECK-128-LABEL: @write_bool1(
// CHECK-128-NEXT: entry:
-// CHECK-128-NEXT: [[SAVED_VALUE:%.*]] = alloca <vscale x 64 x i1>, align 16
-// CHECK-128-NEXT: store <vscale x 64 x i1> [[X:%.*]], ptr [[SAVED_VALUE]], align 16, !tbaa [[TBAA7:![0-9]+]]
-// CHECK-128-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[SAVED_VALUE]], align 16, !tbaa [[TBAA4]]
+// CHECK-128-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i1> [[X:%.*]] to <vscale x 8 x i8>
+// CHECK-128-NEXT: [[CAST_FIXED:%.*]] = tail call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> [[TMP0]], i64 0)
// CHECK-128-NEXT: [[Y:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i64 16
-// CHECK-128-NEXT: store <16 x i8> [[TMP0]], ptr [[Y]], align 8, !tbaa [[TBAA4]]
+// CHECK-128-NEXT: store <16 x i8> [[CAST_FIXED]], ptr [[Y]], align 8, !tbaa [[TBAA4]]
// CHECK-128-NEXT: ret void
//
// CHECK-256-LABEL: @write_bool1(
// CHECK-256-NEXT: entry:
-// CHECK-256-NEXT: [[SAVED_VALUE:%.*]] = alloca <vscale x 64 x i1>, align 8
-// CHECK-256-NEXT: store <vscale x 64 x i1> [[X:%.*]], ptr [[SAVED_VALUE]], align 8, !tbaa [[TBAA7:![0-9]+]]
-// CHECK-256-NEXT: [[TMP0:%.*]] = load <32 x i8>, ptr [[SAVED_VALUE]], align 8, !tbaa [[TBAA4]]
+// CHECK-256-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i1> [[X:%.*]] to <vscale x 8 x i8>
+// CHECK-256-NEXT: [[CAST_FIXED:%.*]] = tail call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[TMP0]], i64 0)
// CHECK-256-NEXT: [[Y:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i64 32
-// CHECK-256-NEXT: store <32 x i8> [[TMP0]], ptr [[Y]], align 8, !tbaa [[TBAA4]]
+// CHECK-256-NEXT: store <32 x i8> [[CAST_FIXED]], ptr [[Y]], align 8, !tbaa [[TBAA4]]
// CHECK-256-NEXT: ret void
//
void write_bool1(struct struct_bool1 *s, vbool1_t x) {
diff --git a/clang/test/CodeGen/attr-riscv-rvv-vector-bits-call.c b/clang/test/CodeGen/attr-riscv-rvv-vector-bits-call.c
index 888abe1..1824d97 100644
--- a/clang/test/CodeGen/attr-riscv-rvv-vector-bits-call.c
+++ b/clang/test/CodeGen/attr-riscv-rvv-vector-bits-call.c
@@ -70,13 +70,7 @@ fixed_float64m1_t call_float64_ff(fixed_float64m1_t op1, fixed_float64m1_t op2)
// CHECK-LABEL: @call_bool1_ff(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[SAVED_VALUE4:%.*]] = alloca <vscale x 64 x i1>, align 8
-// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 64 x i1>, align 8
-// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1.i64(<vscale x 64 x i1> [[OP1_COERCE:%.*]], <vscale x 64 x i1> [[OP2_COERCE:%.*]], i64 256)
-// CHECK-NEXT: store <vscale x 64 x i1> [[TMP0]], ptr [[SAVED_VALUE4]], align 8, !tbaa [[TBAA4:![0-9]+]]
-// CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr [[SAVED_VALUE4]], align 8, !tbaa [[TBAA8:![0-9]+]]
-// CHECK-NEXT: store <32 x i8> [[TMP1]], ptr [[RETVAL_COERCE]], align 8
-// CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 64 x i1>, ptr [[RETVAL_COERCE]], align 8
+// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1.i64(<vscale x 64 x i1> [[TMP0:%.*]], <vscale x 64 x i1> [[TMP1:%.*]], i64 256)
// CHECK-NEXT: ret <vscale x 64 x i1> [[TMP2]]
//
fixed_bool1_t call_bool1_ff(fixed_bool1_t op1, fixed_bool1_t op2) {
@@ -116,14 +110,8 @@ fixed_float64m1_t call_float64_fs(fixed_float64m1_t op1, vfloat64m1_t op2) {
// CHECK-LABEL: @call_bool1_fs(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[SAVED_VALUE2:%.*]] = alloca <vscale x 64 x i1>, align 8
-// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 64 x i1>, align 8
-// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1.i64(<vscale x 64 x i1> [[OP1_COERCE:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 256)
-// CHECK-NEXT: store <vscale x 64 x i1> [[TMP0]], ptr [[SAVED_VALUE2]], align 8, !tbaa [[TBAA4]]
-// CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr [[SAVED_VALUE2]], align 8, !tbaa [[TBAA8]]
-// CHECK-NEXT: store <32 x i8> [[TMP1]], ptr [[RETVAL_COERCE]], align 8
-// CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 64 x i1>, ptr [[RETVAL_COERCE]], align 8
-// CHECK-NEXT: ret <vscale x 64 x i1> [[TMP2]]
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1.i64(<vscale x 64 x i1> [[TMP0:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 256)
+// CHECK-NEXT: ret <vscale x 64 x i1> [[TMP1]]
//
fixed_bool1_t call_bool1_fs(fixed_bool1_t op1, vbool1_t op2) {
return __riscv_vmand(op1, op2, __riscv_v_fixed_vlen);
@@ -162,14 +150,8 @@ fixed_float64m1_t call_float64_ss(vfloat64m1_t op1, vfloat64m1_t op2) {
// CHECK-LABEL: @call_bool1_ss(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[SAVED_VALUE:%.*]] = alloca <vscale x 64 x i1>, align 8
-// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 64 x i1>, align 8
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 256)
-// CHECK-NEXT: store <vscale x 64 x i1> [[TMP0]], ptr [[SAVED_VALUE]], align 8, !tbaa [[TBAA4]]
-// CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr [[SAVED_VALUE]], align 8, !tbaa [[TBAA8]]
-// CHECK-NEXT: store <32 x i8> [[TMP1]], ptr [[RETVAL_COERCE]], align 8
-// CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 64 x i1>, ptr [[RETVAL_COERCE]], align 8
-// CHECK-NEXT: ret <vscale x 64 x i1> [[TMP2]]
+// CHECK-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
fixed_bool1_t call_bool1_ss(vbool1_t op1, vbool1_t op2) {
return __riscv_vmand(op1, op2, __riscv_v_fixed_vlen);
diff --git a/clang/test/CodeGen/attr-riscv-rvv-vector-bits-cast.c b/clang/test/CodeGen/attr-riscv-rvv-vector-bits-cast.c
index fe27817..3806c3e 100644
--- a/clang/test/CodeGen/attr-riscv-rvv-vector-bits-cast.c
+++ b/clang/test/CodeGen/attr-riscv-rvv-vector-bits-cast.c
@@ -65,13 +65,7 @@ fixed_float64m1_t from_vfloat64m1_t(vfloat64m1_t type) {
// CHECK-LABEL: @from_vbool1_t(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[SAVED_VALUE:%.*]] = alloca <vscale x 64 x i1>, align 8
-// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 64 x i1>, align 8
-// CHECK-NEXT: store <vscale x 64 x i1> [[TYPE:%.*]], ptr [[SAVED_VALUE]], align 8, !tbaa [[TBAA4:![0-9]+]]
-// CHECK-NEXT: [[TMP0:%.*]] = load <32 x i8>, ptr [[SAVED_VALUE]], align 8, !tbaa [[TBAA8:![0-9]+]]
-// CHECK-NEXT: store <32 x i8> [[TMP0]], ptr [[RETVAL_COERCE]], align 8
-// CHECK-NEXT: [[TMP1:%.*]] = load <vscale x 64 x i1>, ptr [[RETVAL_COERCE]], align 8
-// CHECK-NEXT: ret <vscale x 64 x i1> [[TMP1]]
+// CHECK-NEXT: ret <vscale x 64 x i1> [[TYPE:%.*]]
//
fixed_bool1_t from_vbool1_t(vbool1_t type) {
return type;
@@ -79,7 +73,7 @@ fixed_bool1_t from_vbool1_t(vbool1_t type) {
// CHECK-LABEL: @to_vbool1_t(
// CHECK-NEXT: entry:
-// CHECK-NEXT: ret <vscale x 64 x i1> [[TYPE_COERCE:%.*]]
+// CHECK-NEXT: ret <vscale x 64 x i1> [[TMP0:%.*]]
//
vbool1_t to_vbool1_t(fixed_bool1_t type) {
return type;
@@ -105,8 +99,8 @@ vbool4_t to_vbool4_t(fixed_bool4_t type) {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[SAVED_VALUE:%.*]] = alloca <vscale x 2 x i1>, align 1
// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 2 x i1>, align 1
-// CHECK-NEXT: store <vscale x 2 x i1> [[TYPE:%.*]], ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA9:![0-9]+]]
-// CHECK-NEXT: [[TMP0:%.*]] = load <1 x i8>, ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA8]]
+// CHECK-NEXT: store <vscale x 2 x i1> [[TYPE:%.*]], ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA4:![0-9]+]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <1 x i8>, ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA8:![0-9]+]]
// CHECK-NEXT: store <1 x i8> [[TMP0]], ptr [[RETVAL_COERCE]], align 1
// CHECK-NEXT: [[TMP1:%.*]] = load <vscale x 2 x i1>, ptr [[RETVAL_COERCE]], align 1
// CHECK-NEXT: ret <vscale x 2 x i1> [[TMP1]]
diff --git a/clang/test/CodeGen/attr-riscv-rvv-vector-bits-codegen.c b/clang/test/CodeGen/attr-riscv-rvv-vector-bits-codegen.c
index ac22bdc..eb769fa 100644
--- a/clang/test/CodeGen/attr-riscv-rvv-vector-bits-codegen.c
+++ b/clang/test/CodeGen/attr-riscv-rvv-vector-bits-codegen.c
@@ -53,25 +53,24 @@ fixed_bool32_t global_bool32;
// CHECK-NEXT: [[M_ADDR:%.*]] = alloca <vscale x 64 x i1>, align 1
// CHECK-NEXT: [[VEC_ADDR:%.*]] = alloca <vscale x 64 x i8>, align 1
// CHECK-NEXT: [[MASK:%.*]] = alloca <vscale x 64 x i1>, align 1
-// CHECK-NEXT: [[SAVED_VALUE:%.*]] = alloca <32 x i8>, align 32
// CHECK-NEXT: store <vscale x 64 x i1> [[M:%.*]], ptr [[M_ADDR]], align 1
// CHECK-NEXT: store <vscale x 64 x i8> [[VEC:%.*]], ptr [[VEC_ADDR]], align 1
// CHECK-NEXT: [[TMP0:%.*]] = load <vscale x 64 x i1>, ptr [[M_ADDR]], align 1
// CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @global_bool1, align 8
-// CHECK-NEXT: store <32 x i8> [[TMP1]], ptr [[SAVED_VALUE]], align 32
-// CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 64 x i1>, ptr [[SAVED_VALUE]], align 32
+// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[TMP1]], i64 0)
+// CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 8 x i8> [[CAST_SCALABLE]] to <vscale x 64 x i1>
// CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1.i64(<vscale x 64 x i1> [[TMP0]], <vscale x 64 x i1> [[TMP2]], i64 256)
// CHECK-NEXT: store <vscale x 64 x i1> [[TMP3]], ptr [[MASK]], align 1
// CHECK-NEXT: [[TMP4:%.*]] = load <vscale x 64 x i1>, ptr [[MASK]], align 1
// CHECK-NEXT: [[TMP5:%.*]] = load <vscale x 64 x i8>, ptr [[VEC_ADDR]], align 1
// CHECK-NEXT: [[TMP6:%.*]] = load <256 x i8>, ptr @global_vec_int8m8, align 8
-// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.v256i8(<vscale x 64 x i8> undef, <256 x i8> [[TMP6]], i64 0)
-// CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[TMP5]], <vscale x 64 x i8> [[CAST_SCALABLE]], <vscale x 64 x i1> [[TMP4]], i64 256, i64 3)
+// CHECK-NEXT: [[CAST_SCALABLE1:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.v256i8(<vscale x 64 x i8> undef, <256 x i8> [[TMP6]], i64 0)
+// CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[TMP5]], <vscale x 64 x i8> [[CAST_SCALABLE1]], <vscale x 64 x i1> [[TMP4]], i64 256, i64 3)
// CHECK-NEXT: [[CAST_FIXED:%.*]] = call <256 x i8> @llvm.vector.extract.v256i8.nxv64i8(<vscale x 64 x i8> [[TMP7]], i64 0)
// CHECK-NEXT: store <256 x i8> [[CAST_FIXED]], ptr [[RETVAL]], align 8
// CHECK-NEXT: [[TMP8:%.*]] = load <256 x i8>, ptr [[RETVAL]], align 8
-// CHECK-NEXT: [[CAST_SCALABLE1:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.v256i8(<vscale x 64 x i8> undef, <256 x i8> [[TMP8]], i64 0)
-// CHECK-NEXT: ret <vscale x 64 x i8> [[CAST_SCALABLE1]]
+// CHECK-NEXT: [[CAST_SCALABLE2:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.v256i8(<vscale x 64 x i8> undef, <256 x i8> [[TMP8]], i64 0)
+// CHECK-NEXT: ret <vscale x 64 x i8> [[CAST_SCALABLE2]]
//
fixed_int8m8_t test_bool1(vbool1_t m, vint8m8_t vec) {
vbool1_t mask = __riscv_vmand(m, global_bool1, __riscv_v_fixed_vlen);
@@ -181,15 +180,15 @@ fixed_int32m1_t array_arg(fixed_int32m1_t arr[]) {
// CHECK-NEXT: [[RETVAL:%.*]] = alloca <32 x i8>, align 8
// CHECK-NEXT: [[ARR:%.*]] = alloca [3 x <32 x i8>], align 8
// CHECK-NEXT: [[PARR:%.*]] = alloca ptr, align 8
-// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 64 x i1>, align 8
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <32 x i8>], ptr [[ARR]], i64 0, i64 0
// CHECK-NEXT: store ptr [[ARRAYIDX]], ptr [[PARR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PARR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr [[TMP0]], align 8
// CHECK-NEXT: store <32 x i8> [[TMP1]], ptr [[RETVAL]], align 8
-// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL_COERCE]], ptr align 8 [[RETVAL]], i64 32, i1 false)
-// CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 64 x i1>, ptr [[RETVAL_COERCE]], align 8
-// CHECK-NEXT: ret <vscale x 64 x i1> [[TMP2]]
+// CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr [[RETVAL]], align 8
+// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[TMP2]], i64 0)
+// CHECK-NEXT: [[TMP3:%.*]] = bitcast <vscale x 8 x i8> [[CAST_SCALABLE]] to <vscale x 64 x i1>
+// CHECK-NEXT: ret <vscale x 64 x i1> [[TMP3]]
//
fixed_bool1_t address_of_array_idx_bool1() {
fixed_bool1_t arr[3];
diff --git a/clang/test/CodeGen/attr-riscv-rvv-vector-bits-globals.c b/clang/test/CodeGen/attr-riscv-rvv-vector-bits-globals.c
index d7df1a2..31a245d 100644
--- a/clang/test/CodeGen/attr-riscv-rvv-vector-bits-globals.c
+++ b/clang/test/CodeGen/attr-riscv-rvv-vector-bits-globals.c
@@ -56,18 +56,16 @@ void write_global_i64(vint64m1_t v) { global_i64 = v; }
// CHECK-64-LABEL: @write_global_bool1(
// CHECK-64-NEXT: entry:
-// CHECK-64-NEXT: [[SAVED_VALUE:%.*]] = alloca <vscale x 64 x i1>, align 8
-// CHECK-64-NEXT: store <vscale x 64 x i1> [[V:%.*]], ptr [[SAVED_VALUE]], align 8, !tbaa [[TBAA7:![0-9]+]]
-// CHECK-64-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[SAVED_VALUE]], align 8, !tbaa [[TBAA4]]
-// CHECK-64-NEXT: store <8 x i8> [[TMP0]], ptr @global_bool1, align 8, !tbaa [[TBAA4]]
+// CHECK-64-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i1> [[V:%.*]] to <vscale x 8 x i8>
+// CHECK-64-NEXT: [[CAST_FIXED:%.*]] = tail call <8 x i8> @llvm.vector.extract.v8i8.nxv8i8(<vscale x 8 x i8> [[TMP0]], i64 0)
+// CHECK-64-NEXT: store <8 x i8> [[CAST_FIXED]], ptr @global_bool1, align 8, !tbaa [[TBAA4]]
// CHECK-64-NEXT: ret void
//
// CHECK-256-LABEL: @write_global_bool1(
// CHECK-256-NEXT: entry:
-// CHECK-256-NEXT: [[SAVED_VALUE:%.*]] = alloca <vscale x 64 x i1>, align 8
-// CHECK-256-NEXT: store <vscale x 64 x i1> [[V:%.*]], ptr [[SAVED_VALUE]], align 8, !tbaa [[TBAA7:![0-9]+]]
-// CHECK-256-NEXT: [[TMP0:%.*]] = load <32 x i8>, ptr [[SAVED_VALUE]], align 8, !tbaa [[TBAA4]]
-// CHECK-256-NEXT: store <32 x i8> [[TMP0]], ptr @global_bool1, align 8, !tbaa [[TBAA4]]
+// CHECK-256-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i1> [[V:%.*]] to <vscale x 8 x i8>
+// CHECK-256-NEXT: [[CAST_FIXED:%.*]] = tail call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[TMP0]], i64 0)
+// CHECK-256-NEXT: store <32 x i8> [[CAST_FIXED]], ptr @global_bool1, align 8, !tbaa [[TBAA4]]
// CHECK-256-NEXT: ret void
//
void write_global_bool1(vbool1_t v) { global_bool1 = v; }
@@ -92,7 +90,7 @@ void write_global_bool4(vbool4_t v) { global_bool4 = v; }
// CHECK-256-LABEL: @write_global_bool32(
// CHECK-256-NEXT: entry:
// CHECK-256-NEXT: [[SAVED_VALUE:%.*]] = alloca <vscale x 2 x i1>, align 1
-// CHECK-256-NEXT: store <vscale x 2 x i1> [[V:%.*]], ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA9:![0-9]+]]
+// CHECK-256-NEXT: store <vscale x 2 x i1> [[V:%.*]], ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA7:![0-9]+]]
// CHECK-256-NEXT: [[TMP0:%.*]] = load <1 x i8>, ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA4]]
// CHECK-256-NEXT: store <1 x i8> [[TMP0]], ptr @global_bool32, align 1, !tbaa [[TBAA4]]
// CHECK-256-NEXT: ret void
@@ -120,18 +118,16 @@ vint64m1_t read_global_i64() { return global_i64; }
// CHECK-64-LABEL: @read_global_bool1(
// CHECK-64-NEXT: entry:
-// CHECK-64-NEXT: [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 8
// CHECK-64-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr @global_bool1, align 8, !tbaa [[TBAA4]]
-// CHECK-64-NEXT: store <8 x i8> [[TMP0]], ptr [[SAVED_VALUE]], align 8, !tbaa [[TBAA4]]
-// CHECK-64-NEXT: [[TMP1:%.*]] = load <vscale x 64 x i1>, ptr [[SAVED_VALUE]], align 8, !tbaa [[TBAA4]]
+// CHECK-64-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v8i8(<vscale x 8 x i8> undef, <8 x i8> [[TMP0]], i64 0)
+// CHECK-64-NEXT: [[TMP1:%.*]] = bitcast <vscale x 8 x i8> [[CAST_SCALABLE]] to <vscale x 64 x i1>
// CHECK-64-NEXT: ret <vscale x 64 x i1> [[TMP1]]
//
// CHECK-256-LABEL: @read_global_bool1(
// CHECK-256-NEXT: entry:
-// CHECK-256-NEXT: [[SAVED_VALUE:%.*]] = alloca <32 x i8>, align 32
// CHECK-256-NEXT: [[TMP0:%.*]] = load <32 x i8>, ptr @global_bool1, align 8, !tbaa [[TBAA4]]
-// CHECK-256-NEXT: store <32 x i8> [[TMP0]], ptr [[SAVED_VALUE]], align 32, !tbaa [[TBAA4]]
-// CHECK-256-NEXT: [[TMP1:%.*]] = load <vscale x 64 x i1>, ptr [[SAVED_VALUE]], align 32, !tbaa [[TBAA4]]
+// CHECK-256-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[TMP0]], i64 0)
+// CHECK-256-NEXT: [[TMP1:%.*]] = bitcast <vscale x 8 x i8> [[CAST_SCALABLE]] to <vscale x 64 x i1>
// CHECK-256-NEXT: ret <vscale x 64 x i1> [[TMP1]]
//
vbool1_t read_global_bool1() { return global_bool1; }
diff --git a/clang/test/CodeGen/builtins.c b/clang/test/CodeGen/builtins.c
index ed03233..8828212 100644
--- a/clang/test/CodeGen/builtins.c
+++ b/clang/test/CodeGen/builtins.c
@@ -496,6 +496,12 @@ long long test_builtin_readcyclecounter(void) {
return __builtin_readcyclecounter();
}
+// CHECK-LABEL: define{{.*}} i64 @test_builtin_readsteadycounter
+long long test_builtin_readsteadycounter(void) {
+ // CHECK: call i64 @llvm.readsteadycounter()
+ return __builtin_readsteadycounter();
+}
+
/// __builtin_launder should be a NOP in C since there are no vtables.
// CHECK-LABEL: define{{.*}} void @test_builtin_launder
void test_builtin_launder(int *p) {
diff --git a/clang/test/Driver/amdgpu-macros.cl b/clang/test/Driver/amdgpu-macros.cl
index 3b10444..0046193 100644
--- a/clang/test/Driver/amdgpu-macros.cl
+++ b/clang/test/Driver/amdgpu-macros.cl
@@ -132,8 +132,8 @@
// RUN: %clang -E -dM -target amdgcn -mcpu=gfx1201 %s 2>&1 | FileCheck --check-prefixes=ARCH-GCN,FAST_FMAF %s -DWAVEFRONT_SIZE=32 -DCPU=gfx1201 -DFAMILY=GFX12
// RUN: %clang -E -dM -target amdgcn -mcpu=gfx9-generic %s 2>&1 | FileCheck --check-prefixes=ARCH-GCN,FAST_FMAF %s -DWAVEFRONT_SIZE=64 -DCPU=gfx9_generic -DFAMILY=GFX9
-// RUN: %clang -E -dM -target amdgcn -mcpu=gfx10.1-generic %s 2>&1 | FileCheck --check-prefixes=ARCH-GCN,FAST_FMAF %s -DWAVEFRONT_SIZE=32 -DCPU=gfx10_1_generic -DFAMILY=GFX10
-// RUN: %clang -E -dM -target amdgcn -mcpu=gfx10.3-generic %s 2>&1 | FileCheck --check-prefixes=ARCH-GCN,FAST_FMAF %s -DWAVEFRONT_SIZE=32 -DCPU=gfx10_3_generic -DFAMILY=GFX10
+// RUN: %clang -E -dM -target amdgcn -mcpu=gfx10-1-generic %s 2>&1 | FileCheck --check-prefixes=ARCH-GCN,FAST_FMAF %s -DWAVEFRONT_SIZE=32 -DCPU=gfx10_1_generic -DFAMILY=GFX10
+// RUN: %clang -E -dM -target amdgcn -mcpu=gfx10-3-generic %s 2>&1 | FileCheck --check-prefixes=ARCH-GCN,FAST_FMAF %s -DWAVEFRONT_SIZE=32 -DCPU=gfx10_3_generic -DFAMILY=GFX10
// RUN: %clang -E -dM -target amdgcn -mcpu=gfx11-generic %s 2>&1 | FileCheck --check-prefixes=ARCH-GCN,FAST_FMAF %s -DWAVEFRONT_SIZE=32 -DCPU=gfx11_generic -DFAMILY=GFX11
// ARCH-GCN-DAG: #define FP_FAST_FMA 1
diff --git a/clang/test/Driver/amdgpu-mcpu.cl b/clang/test/Driver/amdgpu-mcpu.cl
index 6f18ea0..915fa64 100644
--- a/clang/test/Driver/amdgpu-mcpu.cl
+++ b/clang/test/Driver/amdgpu-mcpu.cl
@@ -116,8 +116,8 @@
// RUN: %clang -### -target amdgcn -mcpu=gfx1201 %s 2>&1 | FileCheck --check-prefix=GFX1201 %s
// RUN: %clang -### -target amdgcn -mcpu=gfx9-generic %s 2>&1 | FileCheck --check-prefix=GFX9_GENERIC %s
-// RUN: %clang -### -target amdgcn -mcpu=gfx10.1-generic %s 2>&1 | FileCheck --check-prefix=GFX10_1_GENERIC %s
-// RUN: %clang -### -target amdgcn -mcpu=gfx10.3-generic %s 2>&1 | FileCheck --check-prefix=GFX10_3_GENERIC %s
+// RUN: %clang -### -target amdgcn -mcpu=gfx10-1-generic %s 2>&1 | FileCheck --check-prefix=GFX10_1_GENERIC %s
+// RUN: %clang -### -target amdgcn -mcpu=gfx10-3-generic %s 2>&1 | FileCheck --check-prefix=GFX10_3_GENERIC %s
// RUN: %clang -### -target amdgcn -mcpu=gfx11-generic %s 2>&1 | FileCheck --check-prefix=GFX11_GENERIC %s
// GCNDEFAULT-NOT: -target-cpu
@@ -167,6 +167,6 @@
// GFX1201: "-target-cpu" "gfx1201"
// GFX9_GENERIC: "-target-cpu" "gfx9-generic"
-// GFX10_1_GENERIC: "-target-cpu" "gfx10.1-generic"
-// GFX10_3_GENERIC: "-target-cpu" "gfx10.3-generic"
+// GFX10_1_GENERIC: "-target-cpu" "gfx10-1-generic"
+// GFX10_3_GENERIC: "-target-cpu" "gfx10-3-generic"
// GFX11_GENERIC: "-target-cpu" "gfx11-generic"
diff --git a/clang/test/Driver/autocomplete.c b/clang/test/Driver/autocomplete.c
index d6f5770..c8ceaaf 100644
--- a/clang/test/Driver/autocomplete.c
+++ b/clang/test/Driver/autocomplete.c
@@ -80,6 +80,7 @@
// FLTOALL-NEXT: thin
// RUN: %clang --autocomplete=-fveclib= | FileCheck %s -check-prefix=FVECLIBALL
// FVECLIBALL: Accelerate
+// FVECLIBALL-NEXT: AMDLIBM
// FVECLIBALL-NEXT: ArmPL
// FVECLIBALL-NEXT: Darwin_libsystem_m
// FVECLIBALL-NEXT: libmvec
diff --git a/clang/test/Driver/installapi.h b/clang/test/Driver/installapi.h
new file mode 100644
index 0000000..99379b44d
--- /dev/null
+++ b/clang/test/Driver/installapi.h
@@ -0,0 +1,13 @@
+// Check non-darwin triple is rejected.
+// RUN: not %clang -target x86_64-unknown-unknown -installapi %s 2> %t
+// RUN: FileCheck --check-prefix INVALID_INSTALLAPI -input-file %t %s
+
+// INVALID_INSTALLAPI: error: InstallAPI is not supported for 'x86_64-unknown-unknown'
+
+// Check installapi phases.
+// RUN: %clang -target x86_64-apple-macos11 -ccc-print-phases -installapi %s 2> %t
+// RUN: FileCheck --check-prefix INSTALLAPI_PHASES -input-file %t %s
+
+// INSTALLAPI_PHASES: 0: input,
+// INSTALLAPI_PHASES: installapi,
+// INSTALLAPI_PHASES-SAME: tbd
diff --git a/clang/test/FixIt/fixit-c++11.cpp b/clang/test/FixIt/fixit-c++11.cpp
index a5a47b7..10f4a9d 100644
--- a/clang/test/FixIt/fixit-c++11.cpp
+++ b/clang/test/FixIt/fixit-c++11.cpp
@@ -44,11 +44,13 @@ namespace ScopedEnum {
enum class E b = E::a; // expected-error {{must use 'enum' not 'enum class'}}
struct S {
friend enum class E; // expected-error {{must use 'enum' not 'enum class'}}
+ // expected-warning@-1 {{elaborated enum specifier cannot be declared as a friend}}
+ // expected-note@-2 {{remove 'enum class' to befriend an enum}}
};
}
-struct S2 {
- void f(int i);
+struct S2 {
+ void f(int i);
void g(int i);
};
diff --git a/clang/test/Headers/__clang_hip_math.hip b/clang/test/Headers/__clang_hip_math.hip
index e9a9cb4..37099de 100644
--- a/clang/test/Headers/__clang_hip_math.hip
+++ b/clang/test/Headers/__clang_hip_math.hip
@@ -2557,33 +2557,65 @@ extern "C" __device__ double test_nan(const char *tag) {
return nan(tag);
}
-// CHECK-LABEL: @test_nanf_emptystr(
-// CHECK-NEXT: entry:
-// CHECK-NEXT: ret float 0x7FF8000000000000
+// DEFAULT-LABEL: @test_nanf_emptystr(
+// DEFAULT-NEXT: entry:
+// DEFAULT-NEXT: ret float 0x7FF8000000000000
+//
+// FINITEONLY-LABEL: @test_nanf_emptystr(
+// FINITEONLY-NEXT: entry:
+// FINITEONLY-NEXT: ret float poison
+//
+// APPROX-LABEL: @test_nanf_emptystr(
+// APPROX-NEXT: entry:
+// APPROX-NEXT: ret float 0x7FF8000000000000
//
extern "C" __device__ float test_nanf_emptystr() {
return nanf("");
}
-// CHECK-LABEL: @test_nan_emptystr(
-// CHECK-NEXT: entry:
-// CHECK-NEXT: ret double 0x7FF8000000000000
+// DEFAULT-LABEL: @test_nan_emptystr(
+// DEFAULT-NEXT: entry:
+// DEFAULT-NEXT: ret double 0x7FF8000000000000
+//
+// FINITEONLY-LABEL: @test_nan_emptystr(
+// FINITEONLY-NEXT: entry:
+// FINITEONLY-NEXT: ret double poison
+//
+// APPROX-LABEL: @test_nan_emptystr(
+// APPROX-NEXT: entry:
+// APPROX-NEXT: ret double 0x7FF8000000000000
//
extern "C" __device__ double test_nan_emptystr() {
return nan("");
}
-// CHECK-LABEL: @test_nanf_fill(
-// CHECK-NEXT: entry:
-// CHECK-NEXT: ret float 0x7FF8000000000000
+// DEFAULT-LABEL: @test_nanf_fill(
+// DEFAULT-NEXT: entry:
+// DEFAULT-NEXT: ret float 0x7FF8000000000000
+//
+// FINITEONLY-LABEL: @test_nanf_fill(
+// FINITEONLY-NEXT: entry:
+// FINITEONLY-NEXT: ret float poison
+//
+// APPROX-LABEL: @test_nanf_fill(
+// APPROX-NEXT: entry:
+// APPROX-NEXT: ret float 0x7FF8000000000000
//
extern "C" __device__ float test_nanf_fill() {
return nanf("0x456");
}
-// CHECK-LABEL: @test_nan_fill(
-// CHECK-NEXT: entry:
-// CHECK-NEXT: ret double 0x7FF8000000000000
+// DEFAULT-LABEL: @test_nan_fill(
+// DEFAULT-NEXT: entry:
+// DEFAULT-NEXT: ret double 0x7FF8000000000000
+//
+// FINITEONLY-LABEL: @test_nan_fill(
+// FINITEONLY-NEXT: entry:
+// FINITEONLY-NEXT: ret double poison
+//
+// APPROX-LABEL: @test_nan_fill(
+// APPROX-NEXT: entry:
+// APPROX-NEXT: ret double 0x7FF8000000000000
//
extern "C" __device__ double test_nan_fill() {
return nan("0x123");
diff --git a/clang/test/InstallAPI/installapi-basic.test b/clang/test/InstallAPI/installapi-basic.test
new file mode 100644
index 0000000..8035166
--- /dev/null
+++ b/clang/test/InstallAPI/installapi-basic.test
@@ -0,0 +1,34 @@
+// RUN: rm -rf %t
+// RUN: split-file %s %t
+// RUN: %clang_cc1 -x objective-c -triple arm64-apple-ios13.0.0 -installapi \
+// RUN: -fapplication-extension -current_version 1 -install_name /usr/lib/basic.dylib \
+// RUN: %t/basic_inputs.json -o %t/basic.tbd 2>&1 | FileCheck %s --allow-empty
+// RUN: llvm-readtapi -compare %t/basic.tbd %t/expected.tbd 2>&1 | FileCheck %s --allow-empty
+
+// CHECK-NOT: error:
+// CHECK-NOT: warning:
+
+//--- basic_inputs.json
+
+//--- expected.tbd
+{
+ "main_library": {
+ "compatibility_versions": [
+ {
+ "version": "0"
+ }
+ ],
+ "install_names": [
+ {
+ "name": "/usr/lib/basic.dylib"
+ }
+ ],
+ "target_info": [
+ {
+ "min_deployment": "13.0.0",
+ "target": "arm64-ios"
+ }
+ ]
+ },
+ "tapi_tbd_version": 5
+}
diff --git a/clang/test/Lexer/gnu-flags.c b/clang/test/Lexer/gnu-flags.c
index 6e47547..384339f 100644
--- a/clang/test/Lexer/gnu-flags.c
+++ b/clang/test/Lexer/gnu-flags.c
@@ -1,15 +1,14 @@
// RUN: %clang_cc1 -fsyntax-only -verify %s -DNONE
-// RUN: %clang_cc1 -fsyntax-only -verify %s -DALL -Wgnu
+// RUN: %clang_cc1 -fsyntax-only -verify %s -DALL -Wgnu
// RUN: %clang_cc1 -fsyntax-only -verify %s -DALL \
// RUN: -Wgnu-zero-variadic-macro-arguments \
-// RUN: -Wgnu-imaginary-constant -Wgnu-binary-literal -Wgnu-zero-line-directive
+// RUN: -Wgnu-imaginary-constant -Wgnu-zero-line-directive
// RUN: %clang_cc1 -fsyntax-only -verify %s -DNONE -Wgnu \
// RUN: -Wno-gnu-zero-variadic-macro-arguments \
-// RUN: -Wno-gnu-imaginary-constant -Wno-gnu-binary-literal -Wno-gnu-zero-line-directive
+// RUN: -Wno-gnu-imaginary-constant -Wno-gnu-zero-line-directive
// Additional disabled tests:
// %clang_cc1 -fsyntax-only -verify %s -DZEROARGS -Wgnu-zero-variadic-macro-arguments
// %clang_cc1 -fsyntax-only -verify %s -DIMAGINARYCONST -Wgnu-imaginary-constant
-// %clang_cc1 -fsyntax-only -verify %s -DBINARYLITERAL -Wgnu-binary-literal
// %clang_cc1 -fsyntax-only -verify %s -DLINE0 -Wgnu-zero-line-directive
#if NONE
@@ -38,13 +37,6 @@ void foo( const char* c )
float _Complex c = 1.if;
-#if ALL || BINARYLITERAL
-// expected-warning@+3 {{binary integer literals are a GNU extension}}
-#endif
-
-int b = 0b0101;
-
-
// This case is handled differently because lit has a bug whereby #line 0 is reported to be on line 4294967295
// http://llvm.org/bugs/show_bug.cgi?id=16952
#if ALL || LINE0
diff --git a/clang/test/Misc/target-invalid-cpu-note.c b/clang/test/Misc/target-invalid-cpu-note.c
index 123b203..ef2c1c0 100644
--- a/clang/test/Misc/target-invalid-cpu-note.c
+++ b/clang/test/Misc/target-invalid-cpu-note.c
@@ -37,7 +37,7 @@
// RUN: not %clang_cc1 -triple amdgcn--- -target-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix AMDGCN
// AMDGCN: error: unknown target CPU 'not-a-cpu'
-// AMDGCN-NEXT: note: valid target CPU values are: gfx600, tahiti, gfx601, pitcairn, verde, gfx602, hainan, oland, gfx700, kaveri, gfx701, hawaii, gfx702, gfx703, kabini, mullins, gfx704, bonaire, gfx705, gfx801, carrizo, gfx802, iceland, tonga, gfx803, fiji, polaris10, polaris11, gfx805, tongapro, gfx810, stoney, gfx900, gfx902, gfx904, gfx906, gfx908, gfx909, gfx90a, gfx90c, gfx940, gfx941, gfx942, gfx1010, gfx1011, gfx1012, gfx1013, gfx1030, gfx1031, gfx1032, gfx1033, gfx1034, gfx1035, gfx1036, gfx1100, gfx1101, gfx1102, gfx1103, gfx1150, gfx1151, gfx1200, gfx1201, gfx9-generic, gfx10.1-generic, gfx10.3-generic, gfx11-generic{{$}}
+// AMDGCN-NEXT: note: valid target CPU values are: gfx600, tahiti, gfx601, pitcairn, verde, gfx602, hainan, oland, gfx700, kaveri, gfx701, hawaii, gfx702, gfx703, kabini, mullins, gfx704, bonaire, gfx705, gfx801, carrizo, gfx802, iceland, tonga, gfx803, fiji, polaris10, polaris11, gfx805, tongapro, gfx810, stoney, gfx900, gfx902, gfx904, gfx906, gfx908, gfx909, gfx90a, gfx90c, gfx940, gfx941, gfx942, gfx1010, gfx1011, gfx1012, gfx1013, gfx1030, gfx1031, gfx1032, gfx1033, gfx1034, gfx1035, gfx1036, gfx1100, gfx1101, gfx1102, gfx1103, gfx1150, gfx1151, gfx1200, gfx1201, gfx9-generic, gfx10-1-generic, gfx10-3-generic, gfx11-generic{{$}}
// RUN: not %clang_cc1 -triple wasm64--- -target-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix WEBASM
// WEBASM: error: unknown target CPU 'not-a-cpu'
diff --git a/clang/test/Parser/cxx-decl.cpp b/clang/test/Parser/cxx-decl.cpp
index 8a6e654..4c4bb87 100644
--- a/clang/test/Parser/cxx-decl.cpp
+++ b/clang/test/Parser/cxx-decl.cpp
@@ -252,9 +252,6 @@ namespace DuplicateFriend {
struct A {
friend void friend f(); // expected-warning {{duplicate 'friend' declaration specifier}}
friend struct B friend; // expected-warning {{duplicate 'friend' declaration specifier}}
-#if __cplusplus >= 201103L
- // expected-error@-2 {{'friend' must appear first in a non-function declaration}}
-#endif
};
}
diff --git a/clang/test/Parser/cxx0x-decl.cpp b/clang/test/Parser/cxx0x-decl.cpp
index 18095a4..a0b3266 100644
--- a/clang/test/Parser/cxx0x-decl.cpp
+++ b/clang/test/Parser/cxx0x-decl.cpp
@@ -157,7 +157,7 @@ namespace DuplicateSpecifier {
struct A {
friend constexpr int constexpr friend f(); // expected-warning {{duplicate 'friend' declaration specifier}} \
// expected-error {{duplicate 'constexpr' declaration specifier}}
- friend struct A friend; // expected-warning {{duplicate 'friend'}} expected-error {{'friend' must appear first}}
+ friend struct A friend; // expected-warning {{duplicate 'friend'}}
};
constinit constexpr int n1 = 0; // expected-error {{cannot combine with previous 'constinit'}}
diff --git a/clang/test/Preprocessor/arm-target-features.c b/clang/test/Preprocessor/arm-target-features.c
index 236c9f2..733d068 100644
--- a/clang/test/Preprocessor/arm-target-features.c
+++ b/clang/test/Preprocessor/arm-target-features.c
@@ -737,7 +737,7 @@
// Test whether predefines are as expected when targeting cortex-m55 (softfp FP ABI as default).
// RUN: %clang -target arm-eabi -mcpu=cortex-m55 -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=M55 %s
-// M55: #define __ARM_ARCH 8
+// M55: #define __ARM_ARCH 801
// M55: #define __ARM_ARCH_8_1M_MAIN__ 1
// M55: #define __ARM_ARCH_EXT_IDIV__ 1
// M55-NOT: __ARM_ARCH_ISA_ARM
@@ -764,7 +764,7 @@
// KRAIT-ALLOW-FP-INSTR:#define __ARM_VFPV4__ 1
// RUN: %clang -target arm-arm-none-eabi -march=armv8.1-m.main -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-V81M %s
-// CHECK-V81M: #define __ARM_ARCH 8
+// CHECK-V81M: #define __ARM_ARCH 801
// CHECK-V81M: #define __ARM_ARCH_8_1M_MAIN__ 1
// CHECK-V81M: #define __ARM_ARCH_ISA_THUMB 2
// CHECK-V81M: #define __ARM_ARCH_PROFILE 'M'
@@ -821,14 +821,14 @@
// CHECK-V8M-CDE-MASK2: #define __ARM_FEATURE_CDE_COPROC 0xff
// RUN: %clang -target armv8.1a-none-none-eabi -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-V81A %s
-// CHECK-V81A: #define __ARM_ARCH 8
+// CHECK-V81A: #define __ARM_ARCH 801
// CHECK-V81A: #define __ARM_ARCH_8_1A__ 1
// CHECK-V81A: #define __ARM_ARCH_PROFILE 'A'
// CHECK-V81A: #define __ARM_FEATURE_QRDMX 1
// CHECK-V81A: #define __ARM_FP 0xe
// RUN: %clang -target armv8.2a-none-none-eabi -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-V82A %s
-// CHECK-V82A: #define __ARM_ARCH 8
+// CHECK-V82A: #define __ARM_ARCH 802
// CHECK-V82A: #define __ARM_ARCH_8_2A__ 1
// CHECK-V82A: #define __ARM_ARCH_PROFILE 'A'
// CHECK-V82A: #define __ARM_FEATURE_QRDMX 1
@@ -838,67 +838,67 @@
// CHECK-DRIVERKIT-NOT: #define __ARM_PCS_VFP 1
// RUN: %clang -target armv8.3a-none-none-eabi -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-V83A %s
-// CHECK-V83A: #define __ARM_ARCH 8
+// CHECK-V83A: #define __ARM_ARCH 803
// CHECK-V83A: #define __ARM_ARCH_8_3A__ 1
// CHECK-V83A: #define __ARM_ARCH_PROFILE 'A'
// RUN: %clang -target armv8.4a-none-none-eabi -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-V84A %s
-// CHECK-V84A: #define __ARM_ARCH 8
+// CHECK-V84A: #define __ARM_ARCH 804
// CHECK-V84A: #define __ARM_ARCH_8_4A__ 1
// CHECK-V84A: #define __ARM_ARCH_PROFILE 'A'
// RUN: %clang -target armv8.5a-none-none-eabi -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-V85A %s
-// CHECK-V85A: #define __ARM_ARCH 8
+// CHECK-V85A: #define __ARM_ARCH 805
// CHECK-V85A: #define __ARM_ARCH_8_5A__ 1
// CHECK-V85A: #define __ARM_ARCH_PROFILE 'A'
// RUN: %clang -target armv8.6a-none-none-eabi -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-V86A %s
-// CHECK-V86A: #define __ARM_ARCH 8
+// CHECK-V86A: #define __ARM_ARCH 806
// CHECK-V86A: #define __ARM_ARCH_8_6A__ 1
// CHECK-V86A: #define __ARM_ARCH_PROFILE 'A'
// RUN: %clang -target armv8.7a-none-none-eabi -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-V87A %s
-// CHECK-V87A: #define __ARM_ARCH 8
+// CHECK-V87A: #define __ARM_ARCH 807
// CHECK-V87A: #define __ARM_ARCH_8_7A__ 1
// CHECK-V87A: #define __ARM_ARCH_PROFILE 'A'
// RUN: %clang -target armv8.8a-none-none-eabi -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-V88A %s
-// CHECK-V88A: #define __ARM_ARCH 8
+// CHECK-V88A: #define __ARM_ARCH 808
// CHECK-V88A: #define __ARM_ARCH_8_8A__ 1
// CHECK-V88A: #define __ARM_ARCH_PROFILE 'A'
// RUN: %clang -target armv8.9a-none-none-eabi -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-V89A %s
-// CHECK-V89A: #define __ARM_ARCH 8
+// CHECK-V89A: #define __ARM_ARCH 809
// CHECK-V89A: #define __ARM_ARCH_8_9A__ 1
// CHECK-V89A: #define __ARM_ARCH_PROFILE 'A'
// RUN: %clang -target armv9a-none-none-eabi -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-V9A %s
-// CHECK-V9A: #define __ARM_ARCH 9
+// CHECK-V9A: #define __ARM_ARCH 900
// CHECK-V9A: #define __ARM_ARCH_9A__ 1
// CHECK-V9A: #define __ARM_ARCH_PROFILE 'A'
// RUN: %clang -target armv9.1a-none-none-eabi -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-V91A %s
-// CHECK-V91A: #define __ARM_ARCH 9
+// CHECK-V91A: #define __ARM_ARCH 901
// CHECK-V91A: #define __ARM_ARCH_9_1A__ 1
// CHECK-V91A: #define __ARM_ARCH_PROFILE 'A'
// RUN: %clang -target armv9.2a-none-none-eabi -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-V92A %s
-// CHECK-V92A: #define __ARM_ARCH 9
+// CHECK-V92A: #define __ARM_ARCH 902
// CHECK-V92A: #define __ARM_ARCH_9_2A__ 1
// CHECK-V92A: #define __ARM_ARCH_PROFILE 'A'
// RUN: %clang -target armv9.3a-none-none-eabi -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-V93A %s
-// CHECK-V93A: #define __ARM_ARCH 9
+// CHECK-V93A: #define __ARM_ARCH 903
// CHECK-V93A: #define __ARM_ARCH_9_3A__ 1
// CHECK-V93A: #define __ARM_ARCH_PROFILE 'A'
// RUN: %clang -target armv9.4a-none-none-eabi -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-V94A %s
-// CHECK-V94A: #define __ARM_ARCH 9
+// CHECK-V94A: #define __ARM_ARCH 904
// CHECK-V94A: #define __ARM_ARCH_9_4A__ 1
// CHECK-V94A: #define __ARM_ARCH_PROFILE 'A'
// RUN: %clang -target armv9.5a-none-none-eabi -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-V95A %s
-// CHECK-V95A: #define __ARM_ARCH 9
+// CHECK-V95A: #define __ARM_ARCH 905
// CHECK-V95A: #define __ARM_ARCH_9_5A__ 1
// CHECK-V95A: #define __ARM_ARCH_PROFILE 'A'
diff --git a/clang/test/Preprocessor/fixed-point.c b/clang/test/Preprocessor/fixed-point.c
new file mode 100644
index 0000000..3adf36d
--- /dev/null
+++ b/clang/test/Preprocessor/fixed-point.c
@@ -0,0 +1,67 @@
+/// Assert the fixed point precision macros according to ISO/IEC TR 18037:2008 7.18a.3 are
+/// defined when -ffixed-point is provided.
+
+// RUN: %clang_cc1 -triple=x86_64 -E -dM -ffixed-point -x c < /dev/null | FileCheck -match-full-lines %s
+// RUN: %clang_cc1 -triple=x86_64 -E -dM -ffixed-point -x c++ < /dev/null | FileCheck -match-full-lines %s
+
+/// These are the implementation-defined values for x86_64.
+// CHECK-DAG:#define __SFRACT_EPSILON__ 0.0078125HR
+// CHECK-DAG:#define __SFRACT_FBIT__ 7
+// CHECK-DAG:#define __SFRACT_MAX__ 0.9921875HR
+// CHECK-DAG:#define __SFRACT_MIN__ (-0.5HR-0.5HR)
+
+// CHECK-DAG:#define __USFRACT_EPSILON__ 0.00390625UHR
+// CHECK-DAG:#define __USFRACT_FBIT__ 8
+// CHECK-DAG:#define __USFRACT_MAX__ 0.99609375UHR
+
+// CHECK-DAG:#define __FRACT_EPSILON__ 0.000030517578125R
+// CHECK-DAG:#define __FRACT_FBIT__ 15
+// CHECK-DAG:#define __FRACT_MAX__ 0.999969482421875R
+// CHECK-DAG:#define __FRACT_MIN__ (-0.5R-0.5R)
+
+// CHECK-DAG:#define __UFRACT_EPSILON__ 0.0000152587890625UR
+// CHECK-DAG:#define __UFRACT_FBIT__ 16
+// CHECK-DAG:#define __UFRACT_MAX__ 0.9999847412109375UR
+
+// CHECK-DAG:#define __LFRACT_EPSILON__ 0.0000000004656612873077392578125LR
+// CHECK-DAG:#define __LFRACT_FBIT__ 31
+// CHECK-DAG:#define __LFRACT_MAX__ 0.9999999995343387126922607421875LR
+// CHECK-DAG:#define __LFRACT_MIN__ (-0.5LR-0.5LR)
+
+// CHECK-DAG:#define __ULFRACT_EPSILON__ 0.00000000023283064365386962890625ULR
+// CHECK-DAG:#define __ULFRACT_FBIT__ 32
+// CHECK-DAG:#define __ULFRACT_MAX__ 0.99999999976716935634613037109375ULR
+
+// CHECK-DAG:#define __SACCUM_EPSILON__ 0.0078125HK
+// CHECK-DAG:#define __SACCUM_FBIT__ 7
+// CHECK-DAG:#define __SACCUM_MAX__ 255.9921875HK
+// CHECK-DAG:#define __SACCUM_MIN__ (-128.0HK-128.0HK)
+
+// CHECK-DAG:#define __USACCUM_EPSILON__ 0.00390625UHK
+// CHECK-DAG:#define __USACCUM_FBIT__ 8
+// CHECK-DAG:#define __USACCUM_MAX__ 255.99609375UHK
+
+// CHECK-DAG:#define __ACCUM_EPSILON__ 0.000030517578125K
+// CHECK-DAG:#define __ACCUM_FBIT__ 15
+// CHECK-DAG:#define __ACCUM_MAX__ 65535.999969482421875K
+// CHECK-DAG:#define __ACCUM_MIN__ (-32768.0K-32768.0K)
+
+// CHECK-DAG:#define __UACCUM_EPSILON__ 0.0000152587890625UK
+// CHECK-DAG:#define __UACCUM_FBIT__ 16
+// CHECK-DAG:#define __UACCUM_MAX__ 65535.9999847412109375UK
+
+// CHECK-DAG:#define __LACCUM_EPSILON__ 0.0000000004656612873077392578125LK
+// CHECK-DAG:#define __LACCUM_FBIT__ 31
+// CHECK-DAG:#define __LACCUM_MAX__ 4294967295.9999999995343387126922607421875LK
+// CHECK-DAG:#define __LACCUM_MIN__ (-2147483648.0LK-2147483648.0LK)
+
+// CHECK-DAG:#define __ULACCUM_EPSILON__ 0.00000000023283064365386962890625ULK
+// CHECK-DAG:#define __ULACCUM_FBIT__ 32
+// CHECK-DAG:#define __ULACCUM_MAX__ 4294967295.99999999976716935634613037109375ULK
+
+// CHECK-DAG:#define __SACCUM_IBIT__ 8
+// CHECK-DAG:#define __USACCUM_IBIT__ 8
+// CHECK-DAG:#define __ACCUM_IBIT__ 16
+// CHECK-DAG:#define __UACCUM_IBIT__ 16
+// CHECK-DAG:#define __LACCUM_IBIT__ 32
+// CHECK-DAG:#define __ULACCUM_IBIT__ 32
diff --git a/clang/test/Preprocessor/no-fixed-point.c b/clang/test/Preprocessor/no-fixed-point.c
new file mode 100644
index 0000000..fe88ca2
--- /dev/null
+++ b/clang/test/Preprocessor/no-fixed-point.c
@@ -0,0 +1,7 @@
+/// Assert the fixed point precision macros according to ISO/IEC TR 18037:2008 7.18a.3 are not
+/// defined when -ffixed-point is not provided.
+
+// RUN: %clang_cc1 -triple=x86_64 -E -dM -x c < /dev/null | FileCheck -match-full-lines %s
+// RUN: %clang_cc1 -triple=x86_64 -E -dM -x c++ < /dev/null | FileCheck -match-full-lines %s
+
+// CHECK-NOT:#define __SFRACT_FBIT__ 7
diff --git a/clang/test/SemaCXX/PR40395.cpp b/clang/test/SemaCXX/PR40395.cpp
index 469c86d..ea0fad2 100644
--- a/clang/test/SemaCXX/PR40395.cpp
+++ b/clang/test/SemaCXX/PR40395.cpp
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 -std=c++17 -fms-extensions -triple=x86_64-pc-win32 -verify %s
+// RUN: %clang_cc1 -std=c++17 -fms-extensions -triple=x86_64-pc-win32 -verify %s -fexperimental-new-constant-interpreter
// expected-no-diagnostics
// PR40395 - ConstantExpr shouldn't cause the template object to infinitely
diff --git a/clang/test/SemaCXX/attr-suppress.cpp b/clang/test/SemaCXX/attr-suppress.cpp
index fb5e2ac..e8f6d97 100644
--- a/clang/test/SemaCXX/attr-suppress.cpp
+++ b/clang/test/SemaCXX/attr-suppress.cpp
@@ -23,18 +23,16 @@ union [[gsl::suppress("type.1")]] U {
float f;
};
+// This doesn't really suppress anything but why not?
[[clang::suppress]];
-// expected-error@-1 {{'suppress' attribute only applies to variables and statements}}
namespace N {
[[clang::suppress("in-a-namespace")]];
-// expected-error@-1 {{'suppress' attribute only applies to variables and statements}}
} // namespace N
[[clang::suppress]] int global = 42;
[[clang::suppress]] void foo() {
- // expected-error@-1 {{'suppress' attribute only applies to variables and statements}}
[[clang::suppress]] int *p;
[[clang::suppress]] int a = 0; // no-warning
@@ -56,7 +54,11 @@ namespace N {
}
class [[clang::suppress("type.1")]] V {
- // expected-error@-1 {{'suppress' attribute only applies to variables and statements}}
int i;
float f;
};
+
+// FIXME: There's no good reason why we shouldn't support this case.
+// But it doesn't look like clang generally supports such attributes yet.
+class W : [[clang::suppress]] public V { // expected-error{{'suppress' attribute cannot be applied to a base specifier}}
+};
diff --git a/clang/test/SemaCXX/concept-crash-on-diagnostic.cpp b/clang/test/SemaCXX/concept-crash-on-diagnostic.cpp
index 00a39f9..71e55c8 100644
--- a/clang/test/SemaCXX/concept-crash-on-diagnostic.cpp
+++ b/clang/test/SemaCXX/concept-crash-on-diagnostic.cpp
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 -fsyntax-only -std=c++20 -verify %s
+// RUN: %clang_cc1 -fsyntax-only -std=c++20 -verify %s -fexperimental-new-constant-interpreter
template <typename Iterator> class normal_iterator {};
diff --git a/clang/test/SemaCXX/cxx98-compat.cpp b/clang/test/SemaCXX/cxx98-compat.cpp
index d26e3a1..b31bee6 100644
--- a/clang/test/SemaCXX/cxx98-compat.cpp
+++ b/clang/test/SemaCXX/cxx98-compat.cpp
@@ -220,7 +220,8 @@ struct HasExplicitConversion {
struct Struct {};
enum Enum { enum_val = 0 };
struct BadFriends {
- friend enum ::Enum; // expected-warning {{befriending enumeration type 'enum ::Enum' is incompatible with C++98}}
+ friend enum ::Enum; // expected-warning {{elaborated enum specifier cannot be declared as a friend}}
+ // expected-note@-1 {{remove 'enum' to befriend an enum}}
friend int; // expected-warning {{non-class friend type 'int' is incompatible with C++98}}
friend Struct; // expected-warning {{befriending 'Struct' without 'struct' keyword is incompatible with C++98}}
};
diff --git a/clang/test/SemaCXX/enum-scoped.cpp b/clang/test/SemaCXX/enum-scoped.cpp
index a4da060..b1d9a215 100644
--- a/clang/test/SemaCXX/enum-scoped.cpp
+++ b/clang/test/SemaCXX/enum-scoped.cpp
@@ -174,11 +174,21 @@ namespace N2764 {
struct S {
friend enum class E; // expected-error {{reference to enumeration must use 'enum' not 'enum class'}}
+ // expected-warning@-1 {{elaborated enum specifier cannot be declared as a friend}}
+ // expected-note@-2 {{remove 'enum class' to befriend an enum}}
friend enum class F; // expected-error {{reference to enumeration must use 'enum' not 'enum class'}}
+ // expected-warning@-1 {{elaborated enum specifier cannot be declared as a friend}}
+ // expected-note@-2 {{remove 'enum class' to befriend an enum}}
friend enum G {}; // expected-error {{forward reference}} expected-error {{cannot define a type in a friend declaration}}
+ // expected-warning@-1 {{elaborated enum specifier cannot be declared as a friend}}
+ // expected-note@-2 {{remove 'enum' to befriend an enum}}
friend enum class H {}; // expected-error {{forward reference}} expected-error {{cannot define a type in a friend declaration}}
+ // expected-warning@-1 {{elaborated enum specifier cannot be declared as a friend}}
+ // expected-note@-2 {{remove 'enum' to befriend an enum}}
friend enum I : int {}; // expected-error {{forward reference}} expected-error {{cannot define a type in a friend declaration}}
+ // expected-warning@-1 {{elaborated enum specifier cannot be declared as a friend}}
+ // expected-note@-2 {{remove 'enum' to befriend an enum}}
enum A : int;
A a;
diff --git a/clang/test/SemaCXX/ms-uuid.cpp b/clang/test/SemaCXX/ms-uuid.cpp
index 21f93ec..172e036 100644
--- a/clang/test/SemaCXX/ms-uuid.cpp
+++ b/clang/test/SemaCXX/ms-uuid.cpp
@@ -1,5 +1,7 @@
// RUN: %clang_cc1 -fsyntax-only -verify -fms-extensions %s -Wno-deprecated-declarations
+// RUN: %clang_cc1 -fsyntax-only -verify -fms-extensions %s -Wno-deprecated-declarations -fexperimental-new-constant-interpreter
// RUN: %clang_cc1 -fsyntax-only -std=c++17 -verify -fms-extensions %s -Wno-deprecated-declarations
+// RUN: %clang_cc1 -fsyntax-only -std=c++17 -verify -fms-extensions %s -Wno-deprecated-declarations -fexperimental-new-constant-interpreter
typedef struct _GUID {
__UINT32_TYPE__ Data1;
diff --git a/clang/test/SemaCXX/warn-unsafe-buffer-usage-array.cpp b/clang/test/SemaCXX/warn-unsafe-buffer-usage-array.cpp
index 90c11b1..8b2f103 100644
--- a/clang/test/SemaCXX/warn-unsafe-buffer-usage-array.cpp
+++ b/clang/test/SemaCXX/warn-unsafe-buffer-usage-array.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -std=c++20 -Wno-all -Wunsafe-buffer-usage \
+// RUN: %clang_cc1 -std=c++20 -Wno-everything -Wunsafe-buffer-usage \
// RUN: -fsafe-buffer-usage-suggestions \
// RUN: -verify %s
@@ -22,3 +22,19 @@ struct Foo {
void foo2(Foo& f, unsigned idx) {
f.member_buffer[idx] = 0; // expected-warning{{unsafe buffer access}}
}
+
+void constant_idx_safe(unsigned idx) {
+ int buffer[10];
+ buffer[9] = 0;
+}
+
+void constant_idx_safe0(unsigned idx) {
+ int buffer[10];
+ buffer[0] = 0;
+}
+
+void constant_idx_unsafe(unsigned idx) {
+ int buffer[10]; // expected-warning{{'buffer' is an unsafe buffer that does not perform bounds checks}}
+ // expected-note@-1{{change type of 'buffer' to 'std::array' to label it for hardening}}
+ buffer[10] = 0; // expected-note{{used in buffer access here}}
+}
diff --git a/clang/test/SemaCXX/warn-unsafe-buffer-usage-debug.cpp b/clang/test/SemaCXX/warn-unsafe-buffer-usage-debug.cpp
index a5b578b..4cc1948 100644
--- a/clang/test/SemaCXX/warn-unsafe-buffer-usage-debug.cpp
+++ b/clang/test/SemaCXX/warn-unsafe-buffer-usage-debug.cpp
@@ -53,7 +53,7 @@ void unclaimed_use() {
void implied_unclaimed_var(int *b) { // expected-warning{{'b' is an unsafe pointer used for buffer access}}
int *a = new int[3]; // expected-warning{{'a' is an unsafe pointer used for buffer access}}
a[4] = 7; // expected-note{{used in buffer access here}}
- a = b; // debug-note{{safe buffers debug: gadget 'PointerAssignment' refused to produce a fix}}
+ a = b; // debug-note{{safe buffers debug: gadget 'PtrToPtrAssignment' refused to produce a fix}}
b++; // expected-note{{used in pointer arithmetic here}} \
// debug-note{{safe buffers debug: failed to produce fixit for 'b' : has an unclaimed use}}
}
diff --git a/clang/test/SemaCXX/warn-unsafe-buffer-usage-fixits-array-assign-to-ptr.cpp b/clang/test/SemaCXX/warn-unsafe-buffer-usage-fixits-array-assign-to-ptr.cpp
new file mode 100644
index 0000000..ff91e32
--- /dev/null
+++ b/clang/test/SemaCXX/warn-unsafe-buffer-usage-fixits-array-assign-to-ptr.cpp
@@ -0,0 +1,45 @@
+// RUN: %clang_cc1 -std=c++20 -Wunsafe-buffer-usage \
+// RUN: -fsafe-buffer-usage-suggestions \
+// RUN: -fdiagnostics-parseable-fixits %s 2>&1 | FileCheck %s
+
+void safe_array_assigned_to_safe_ptr(unsigned idx) {
+ int buffer[10];
+ // CHECK-NOT: fix-it:"{{.*}}":{[[@LINE-1]]:
+ int* ptr;
+ // CHECK-NOT: fix-it:"{{.*}}":{[[@LINE-1]]:
+ ptr = buffer;
+ // CHECK-NOT: fix-it:"{{.*}}":{[[@LINE-1]]:
+}
+
+void safe_array_assigned_to_unsafe_ptr(unsigned idx) {
+ int buffer[10];
+ // CHECK-NOT: fix-it:"{{.*}}":{[[@LINE-1]]:
+ int* ptr;
+ // CHECK: fix-it:"{{.*}}":{[[@LINE-1]]:3-[[@LINE-1]]:11}:"std::span<int> ptr"
+ ptr = buffer;
+ // CHECK-NOT: fix-it:"{{.*}}":{[[@LINE-1]]:
+ ptr[idx] = 0;
+}
+
+void unsafe_array_assigned_to_safe_ptr(unsigned idx) {
+ int buffer[10];
+ // CHECK: fix-it:"{{.*}}":{[[@LINE-1]]:3-[[@LINE-1]]:17}:"std::array<int, 10> buffer"
+ int* ptr;
+ // CHECK-NOT: fix-it:"{{.*}}":{[[@LINE-1]]:
+ ptr = buffer;
+ // CHECK: fix-it:"{{.*}}":{[[@LINE-1]]:15-[[@LINE-1]]:15}:".data()"
+ buffer[idx] = 0;
+}
+
+// FIXME: Implement fixit/s for this case.
+// See comment in CArrayToPtrAssignmentGadget::getFixits to learn why this hasn't been implemented.
+void unsafe_array_assigned_to_unsafe_ptr(unsigned idx) {
+ int buffer[10];
+ // CHECK-NOT: fix-it:"{{.*}}":{[[@LINE-1]]:{{.*}}
+ int* ptr;
+ // CHECK-NOT: fix-it:"{{.*}}":{[[@LINE-1]]:{{.*}}
+ ptr = buffer;
+ // CHECK-NOT: fix-it:"{{.*}}":{[[@LINE-1]]:{{.*}}
+ buffer[idx] = 0;
+ ptr[idx] = 0;
+}
diff --git a/clang/test/SemaCXX/warn-unsafe-buffer-usage-fixits-pointer-access.cpp b/clang/test/SemaCXX/warn-unsafe-buffer-usage-fixits-pointer-access.cpp
index ca19702..b3c64f1 100644
--- a/clang/test/SemaCXX/warn-unsafe-buffer-usage-fixits-pointer-access.cpp
+++ b/clang/test/SemaCXX/warn-unsafe-buffer-usage-fixits-pointer-access.cpp
@@ -83,12 +83,27 @@ void unsafe_method_invocation_single_param() {
}
+void unsafe_method_invocation_single_param_array(int idx) {
+ int p[32];
+ // CHECK-DAG: fix-it:"{{.*}}":{[[@LINE-1]]:3-[[@LINE-1]]:12}:"std::array<int, 32> p"
+
+ int tmp = p[idx];
+ foo(p);
+ // CHECK-DAG: fix-it:"{{.*}}":{[[@LINE-1]]:8-[[@LINE-1]]:8}:".data()"
+}
+
void safe_method_invocation_single_param() {
int* p = new int[10];
// CHECK-NOT: fix-it:"{{.*}}":{[[@LINE-1]]:{{.*}}-[[@LINE-1]]:{{.*}}}
foo(p);
}
+void safe_method_invocation_single_param_array() {
+ int p[10];
+ foo(p);
+ // CHECK-NO: fix-it:"{{.*}}":{[[@LINE-1]]:{{.*}}-[[@LINE-1]]:{{.*}}}:".data()"
+}
+
void unsafe_method_invocation_double_param() {
int* p = new int[10];
// CHECK-DAG: fix-it:"{{.*}}":{[[@LINE-1]]:3-[[@LINE-1]]:11}:"std::span<int> p"
@@ -111,6 +126,20 @@ void unsafe_method_invocation_double_param() {
m1(q, q, 8);
}
+void unsafe_method_invocation_double_param_array(int idx) {
+ int p[14];
+ // CHECK-DAG: fix-it:"{{.*}}":{[[@LINE-1]]:3-[[@LINE-1]]:12}:"std::array<int, 14> p"
+
+ int q[40];
+ // CHECK-DAG: fix-it:"{{.*}}":{[[@LINE-1]]:3-[[@LINE-1]]:12}:"std::array<int, 40> q"
+
+ q[idx] = p[idx];
+
+ m1(p, p, 10);
+ // CHECK-DAG: fix-it:"{{.*}}":{[[@LINE-1]]:7-[[@LINE-1]]:7}:".data()"
+ // CHECK-DAG: fix-it:"{{.*}}":{[[@LINE-2]]:10-[[@LINE-2]]:10}:".data()"
+}
+
void unsafe_access_in_lamda() {
int* p = new int[10];
// CHECK-DAG: fix-it:"{{.*}}":{[[@LINE-1]]:3-[[@LINE-1]]:11}:"std::span<int> p"
@@ -177,4 +206,23 @@ void fixits_in_lambda_capture_rename() {
};
p[5] = 10;
-}
+}
+
+bool ptr_comparison(int* ptr, unsigned idx) {
+ int arr[10];
+ // CHECK-DAG: fix-it:"{{.*}}":{[[@LINE-1]]:3-[[@LINE-1]]:14}:"std::array<int, 10> arr"
+ arr[idx] = idx;
+
+ return arr > ptr;
+ // CHECK-DAG: fix-it:"{{.*}}":{[[@LINE-1]]:13-[[@LINE-1]]:13}:".data()"
+}
+
+int long long ptr_distance(int* ptr, unsigned idx) {
+ int arr[10];
+ // CHECK-DAG: fix-it:"{{.*}}":{[[@LINE-1]]:3-[[@LINE-1]]:14}:"std::array<int, 10> arr"
+ arr[idx] = idx;
+
+ int long long dist = arr - ptr;
+ // CHECK-DAG: fix-it:"{{.*}}":{[[@LINE-1]]:27-[[@LINE-1]]:27}:".data()"
+ return dist;
+}
diff --git a/clang/test/SemaCXX/warn-unsafe-buffer-usage-fixits-pointer-arg-to-func-ptr-call.cpp b/clang/test/SemaCXX/warn-unsafe-buffer-usage-fixits-pointer-arg-to-func-ptr-call.cpp
new file mode 100644
index 0000000..216813c
--- /dev/null
+++ b/clang/test/SemaCXX/warn-unsafe-buffer-usage-fixits-pointer-arg-to-func-ptr-call.cpp
@@ -0,0 +1,49 @@
+// RUN: %clang_cc1 -std=c++20 -Wunsafe-buffer-usage \
+// RUN: -fsafe-buffer-usage-suggestions \
+// RUN: -fdiagnostics-parseable-fixits %s 2>&1 | FileCheck %s
+
+void unsafe_array_func_ptr_call(void (*fn_ptr)(int *param)) {
+ int p[32];
+ // CHECK-DAG: fix-it:"{{.*}}":{[[@LINE-1]]:3-[[@LINE-1]]:12}:"std::array<int, 32> p"
+
+ int idx;
+ p[idx] = 10;
+ fn_ptr(p);
+ // CHECK-DAG: fix-it:"{{.*}}":{[[@LINE-1]]:11-[[@LINE-1]]:11}:".data()"
+}
+
+void unsafe_ptr_func_ptr_call(void (*fn_ptr)(int *param)) {
+ int *p;
+ // CHECK-DAG: fix-it:"{{.*}}":{[[@LINE-1]]:3-[[@LINE-1]]:9}:"std::span<int> p"
+
+ p[5] = 10;
+ fn_ptr(p);
+ // CHECK-DAG: fix-it:"{{.*}}":{[[@LINE-1]]:11-[[@LINE-1]]:11}:".data()"
+}
+
+void addr_of_unsafe_ptr_func_ptr_call(void (*fn_ptr)(int *param)) {
+ int *p;
+ // CHECK-DAG: fix-it:"{{.*}}":{[[@LINE-1]]:3-[[@LINE-1]]:9}:"std::span<int> p"
+
+ p[5] = 10;
+ fn_ptr(&p[0]);
+ // CHECK-DAG: fix-it:"{{.*}}":{[[@LINE-1]]:10-[[@LINE-1]]:15}:"p.data()"
+}
+
+void addr_of_unsafe_ptr_w_offset_func_ptr_call(void (*fn_ptr)(int *param)) {
+ int *p;
+ // CHECK-DAG: fix-it:"{{.*}}":{[[@LINE-1]]:3-[[@LINE-1]]:9}:"std::span<int> p"
+
+ p[5] = 10;
+ fn_ptr(&p[3]);
+ // CHECK-DAG: fix-it:"{{.*}}":{[[@LINE-1]]:10-[[@LINE-1]]:15}:"&p.data()[3]"
+}
+
+void preincrement_unsafe_ptr_func_ptr_call(void (*fn_ptr)(int *param)) {
+ int *p;
+ // CHECK-DAG: fix-it:"{{.*}}":{[[@LINE-1]]:3-[[@LINE-1]]:9}:"std::span<int> p"
+
+ p[5] = 10;
+ fn_ptr(++p);
+ // CHECK-DAG: fix-it:"{{.*}}":{[[@LINE-1]]:10-[[@LINE-1]]:13}:"(p = p.subspan(1)).data()"
+}
diff --git a/clang/test/SemaCXX/warn-unsafe-buffer-usage.cpp b/clang/test/SemaCXX/warn-unsafe-buffer-usage.cpp
index 67cdf25..642db0e 100644
--- a/clang/test/SemaCXX/warn-unsafe-buffer-usage.cpp
+++ b/clang/test/SemaCXX/warn-unsafe-buffer-usage.cpp
@@ -36,7 +36,7 @@ void testIncrement(char *p) { // expected-warning{{'p' is an unsafe pointer used
void * voidPtrCall(void);
char * charPtrCall(void);
-void testArraySubscripts(int *p, int **pp) {
+void testArraySubscripts(int idx, int *p, int **pp) {
// expected-warning@-1{{'p' is an unsafe pointer used for buffer access}}
// expected-warning@-2{{'pp' is an unsafe pointer used for buffer access}}
foo(p[1], // expected-note{{used in buffer access here}}
@@ -64,13 +64,14 @@ void testArraySubscripts(int *p, int **pp) {
// expected-note@-1{{change type of 'a' to 'std::array' to label it for hardening}}
int b[10][10]; // expected-warning{{'b' is an unsafe buffer that does not perform bounds checks}}
- foo(a[1], 1[a], // expected-note2{{used in buffer access here}}
- b[3][4], // expected-warning{{unsafe buffer access}}
- // expected-note@-1{{used in buffer access here}}
- 4[b][3], // expected-warning{{unsafe buffer access}}
- // expected-note@-1{{used in buffer access here}}
- 4[3[b]]); // expected-warning{{unsafe buffer access}}
- // expected-note@-1{{used in buffer access here}}
+ foo(a[idx], idx[a], // expected-note2{{used in buffer access here}}
+ b[idx][idx + 1], // expected-warning{{unsafe buffer access}}
+ // expected-note@-1{{used in buffer access here}}
+ (idx + 1)[b][idx],// expected-warning{{unsafe buffer access}}
+ // expected-note@-1{{used in buffer access here}}
+ (idx + 1)[idx[b]]);
+ // expected-warning@-1{{unsafe buffer access}}
+ // expected-note@-2{{used in buffer access here}}
// Not to warn when index is zero
foo(p[0], pp[0][0], 0[0[pp]], 0[pp][0],
@@ -158,9 +159,9 @@ void testLambdaCaptureAndGlobal(int * p) {
// expected-warning@-1{{'p' is an unsafe pointer used for buffer access}}
int a[10]; // expected-warning{{'a' is an unsafe buffer that does not perform bounds checks}}
- auto Lam = [p, a]() {
+ auto Lam = [p, a](int idx) {
return p[1] // expected-note{{used in buffer access here}}
- + a[1] + garray[1] // expected-note2{{used in buffer access here}}
+ + a[idx] + garray[idx]// expected-note2{{used in buffer access here}}
+ gp[1]; // expected-note{{used in buffer access here}}
};
@@ -178,31 +179,31 @@ void testLambdaCapture() {
// expected-note@-1{{change type of 'b' to 'std::array' to label it for hardening}}
int c[10];
- auto Lam1 = [a]() {
- return a[1]; // expected-note{{used in buffer access here}}
+ auto Lam1 = [a](unsigned idx) {
+ return a[idx]; // expected-note{{used in buffer access here}}
};
- auto Lam2 = [x = b[3]]() { // expected-note{{used in buffer access here}}
+ auto Lam2 = [x = b[c[5]]]() { // expected-note{{used in buffer access here}}
return x;
};
- auto Lam = [x = c]() { // expected-warning{{'x' is an unsafe pointer used for buffer access}}
- return x[3]; // expected-note{{used in buffer access here}}
+ auto Lam = [x = c](unsigned idx) { // expected-warning{{'x' is an unsafe pointer used for buffer access}}
+ return x[idx]; // expected-note{{used in buffer access here}}
};
}
-void testLambdaImplicitCapture() {
+void testLambdaImplicitCapture(long idx) {
int a[10]; // expected-warning{{'a' is an unsafe buffer that does not perform bounds checks}}
// expected-note@-1{{change type of 'a' to 'std::array' to label it for hardening}}
int b[10]; // expected-warning{{'b' is an unsafe buffer that does not perform bounds checks}}
// expected-note@-1{{change type of 'b' to 'std::array' to label it for hardening}}
auto Lam1 = [=]() {
- return a[1]; // expected-note{{used in buffer access here}}
+ return a[idx]; // expected-note{{used in buffer access here}}
};
auto Lam2 = [&]() {
- return b[1]; // expected-note{{used in buffer access here}}
+ return b[idx]; // expected-note{{used in buffer access here}}
};
}
@@ -344,38 +345,37 @@ int testVariableDecls(int * p) {
return p[1]; // expected-note{{used in buffer access here}}
}
-template<typename T> void fArr(T t[]) {
+template<typename T> void fArr(T t[], long long idx) {
// expected-warning@-1{{'t' is an unsafe pointer used for buffer access}}
foo(t[1]); // expected-note{{used in buffer access here}}
T ar[8]; // expected-warning{{'ar' is an unsafe buffer that does not perform bounds checks}}
// expected-note@-1{{change type of 'ar' to 'std::array' to label it for hardening}}
- foo(ar[5]); // expected-note{{used in buffer access here}}
+ foo(ar[idx]); // expected-note{{used in buffer access here}}
}
-template void fArr<int>(int t[]); // FIXME: expected note {{in instantiation of}}
+template void fArr<int>(int t[], long long); // FIXME: expected note {{in instantiation of}}
int testReturn(int t[]) {// expected-note{{change type of 't' to 'std::span' to preserve bounds information}}
// expected-warning@-1{{'t' is an unsafe pointer used for buffer access}}
return t[1]; // expected-note{{used in buffer access here}}
}
-int testArrayAccesses(int n) {
+int testArrayAccesses(int n, int idx) {
// auto deduced array type
int cArr[2][3] = {{1, 2, 3}, {4, 5, 6}};
// expected-warning@-1{{'cArr' is an unsafe buffer that does not perform bounds checks}}
int d = cArr[0][0];
foo(cArr[0][0]);
- foo(cArr[1][2]); // expected-note{{used in buffer access here}}
- // expected-warning@-1{{unsafe buffer access}}
- auto cPtr = cArr[1][2]; // expected-note{{used in buffer access here}}
- // expected-warning@-1{{unsafe buffer access}}
+ foo(cArr[idx][idx + 1]); // expected-note{{used in buffer access here}}
+ // expected-warning@-1{{unsafe buffer access}}
+ auto cPtr = cArr[idx][idx * 2]; // expected-note{{used in buffer access here}}
+ // expected-warning@-1{{unsafe buffer access}}
foo(cPtr);
// Typdefs
typedef int A[3];
const A tArr = {4, 5, 6};
- // expected-warning@-1{{'tArr' is an unsafe buffer that does not perform bounds checks}}
- foo(tArr[0], tArr[1]); // expected-note{{used in buffer access here}}
+ foo(tArr[0], tArr[1]);
return cArr[0][1]; // expected-warning{{unsafe buffer access}}
}
diff --git a/clang/test/SemaObjC/attr-suppress.m b/clang/test/SemaObjC/attr-suppress.m
index ade8f94..c12da09 100644
--- a/clang/test/SemaObjC/attr-suppress.m
+++ b/clang/test/SemaObjC/attr-suppress.m
@@ -6,8 +6,7 @@
SUPPRESS1 int global = 42;
SUPPRESS1 void foo() {
- // expected-error@-1 {{'suppress' attribute only applies to variables and statements}}
- SUPPRESS1 int *p;
+ SUPPRESS1 int *p; // no-warning
SUPPRESS1 int a = 0; // no-warning
SUPPRESS2()
@@ -28,23 +27,19 @@ SUPPRESS1 void foo() {
// GNU-style attributes and C++11 attributes apply to different things when
// written like this. GNU attribute gets attached to the declaration, while
// C++11 attribute ends up on the type.
- int SUPPRESS2("r") z;
- SUPPRESS2(foo)
+ int SUPPRESS2("r") z; // no-warning
+ SUPPRESS2(foo) // no-warning
float f;
// expected-error@-2 {{expected string literal as argument of 'suppress' attribute}}
}
-union SUPPRESS2("type.1") U {
- // expected-error@-1 {{'suppress' attribute only applies to variables and statements}}
+union SUPPRESS2("type.1") U { // no-warning
int i;
float f;
};
-SUPPRESS1 @interface Test {
- // expected-error@-1 {{'suppress' attribute only applies to variables and statements}}
+SUPPRESS1 @interface Test { // no-warning
}
-@property SUPPRESS2("prop") int *prop;
-// expected-error@-1 {{'suppress' attribute only applies to variables and statements}}
-- (void)bar:(int)x SUPPRESS1;
-// expected-error@-1 {{'suppress' attribute only applies to variables and statements}}
+@property SUPPRESS2("prop") int *prop; // no-warning
+- (void)bar:(int)x SUPPRESS1; // no-warning
@end
diff --git a/clang/test/lit.cfg.py b/clang/test/lit.cfg.py
index 271372b..f93b5d9 100644
--- a/clang/test/lit.cfg.py
+++ b/clang/test/lit.cfg.py
@@ -97,6 +97,7 @@ tools = [
"llvm-lto",
"llvm-lto2",
"llvm-profdata",
+ "llvm-readtapi",
ToolSubst(
"%clang_extdef_map",
command=FindTool("clang-extdef-mapping"),
diff --git a/clang/tools/clang-offload-packager/ClangOffloadPackager.cpp b/clang/tools/clang-offload-packager/ClangOffloadPackager.cpp
index 08de3f3..c36a5aa 100644
--- a/clang/tools/clang-offload-packager/ClangOffloadPackager.cpp
+++ b/clang/tools/clang-offload-packager/ClangOffloadPackager.cpp
@@ -104,33 +104,36 @@ static Error bundleImages() {
inconvertibleErrorCode(),
"'file' and 'triple' are required image arguments");
- OffloadBinary::OffloadingImage ImageBinary{};
- std::unique_ptr<llvm::MemoryBuffer> DeviceImage;
- for (const auto &[Key, Value] : Args) {
- if (Key == "file") {
- llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> ObjectOrErr =
- llvm::MemoryBuffer::getFileOrSTDIN(Value);
- if (std::error_code EC = ObjectOrErr.getError())
- return errorCodeToError(EC);
-
- // Clang uses the '.o' suffix for LTO bitcode.
- if (identify_magic((*ObjectOrErr)->getBuffer()) == file_magic::bitcode)
- ImageBinary.TheImageKind = object::IMG_Bitcode;
- else
- ImageBinary.TheImageKind =
- getImageKind(sys::path::extension(Value).drop_front());
- ImageBinary.Image = std::move(*ObjectOrErr);
- } else if (Key == "kind") {
- ImageBinary.TheOffloadKind = getOffloadKind(Value);
- } else {
- ImageBinary.StringData[Key] = Value;
+ // Permit using multiple instances of `file` in a single string.
+ for (auto &File : llvm::split(Args["file"], ",")) {
+ OffloadBinary::OffloadingImage ImageBinary{};
+ std::unique_ptr<llvm::MemoryBuffer> DeviceImage;
+
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> ObjectOrErr =
+ llvm::MemoryBuffer::getFileOrSTDIN(File);
+ if (std::error_code EC = ObjectOrErr.getError())
+ return errorCodeToError(EC);
+
+ // Clang uses the '.o' suffix for LTO bitcode.
+ if (identify_magic((*ObjectOrErr)->getBuffer()) == file_magic::bitcode)
+ ImageBinary.TheImageKind = object::IMG_Bitcode;
+ else
+ ImageBinary.TheImageKind =
+ getImageKind(sys::path::extension(File).drop_front());
+ ImageBinary.Image = std::move(*ObjectOrErr);
+ for (const auto &[Key, Value] : Args) {
+ if (Key == "kind") {
+ ImageBinary.TheOffloadKind = getOffloadKind(Value);
+ } else if (Key != "file") {
+ ImageBinary.StringData[Key] = Value;
+ }
}
+ llvm::SmallString<0> Buffer = OffloadBinary::write(ImageBinary);
+ if (Buffer.size() % OffloadBinary::getAlignment() != 0)
+ return createStringError(inconvertibleErrorCode(),
+ "Offload binary has invalid size alignment");
+ OS << Buffer;
}
- llvm::SmallString<0> Buffer = OffloadBinary::write(ImageBinary);
- if (Buffer.size() % OffloadBinary::getAlignment() != 0)
- return createStringError(inconvertibleErrorCode(),
- "Offload binary has invalid size alignment");
- OS << Buffer;
}
if (Error E = writeFile(OutputFile,
diff --git a/clang/tools/libclang/CIndex.cpp b/clang/tools/libclang/CIndex.cpp
index e5c0971..4ded92c 100644
--- a/clang/tools/libclang/CIndex.cpp
+++ b/clang/tools/libclang/CIndex.cpp
@@ -6114,6 +6114,8 @@ CXString clang_getCursorKindSpelling(enum CXCursorKind Kind) {
return cxstring::createRef("attribute(aligned)");
case CXCursor_ConceptDecl:
return cxstring::createRef("ConceptDecl");
+ case CXCursor_OpenACCComputeConstruct:
+ return cxstring::createRef("OpenACCComputeConstruct");
}
llvm_unreachable("Unhandled CXCursorKind");
diff --git a/clang/tools/libclang/CXCursor.cpp b/clang/tools/libclang/CXCursor.cpp
index 01b8a23..454bf75 100644
--- a/clang/tools/libclang/CXCursor.cpp
+++ b/clang/tools/libclang/CXCursor.cpp
@@ -870,6 +870,9 @@ CXCursor cxcursor::MakeCXCursor(const Stmt *S, const Decl *Parent,
case Stmt::OMPParallelGenericLoopDirectiveClass:
K = CXCursor_OMPParallelGenericLoopDirective;
break;
+ case Stmt::OpenACCComputeConstructClass:
+ K = CXCursor_OpenACCComputeConstruct;
+ break;
case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
K = CXCursor_OMPTargetParallelGenericLoopDirective;
break;
diff --git a/clang/tools/scan-build/bin/scan-build b/clang/tools/scan-build/bin/scan-build
index 04734d9..37241c6 100755
--- a/clang/tools/scan-build/bin/scan-build
+++ b/clang/tools/scan-build/bin/scan-build
@@ -722,9 +722,18 @@ ENDTEXT
print OUT <<ENDTEXT;
</table>
+
+<h2>Filter Results by File</h2>
+<input
+ type="text"
+ id="file_input"
+ onkeyup="searchFiles()"
+ placeholder="Enter a path or filename"
+ title="Enter a path or filename">
+
<h2>Reports</h2>
-<table class="sortable" style="table-layout:automatic">
+<table id="reports_table" class="sortable" style="table-layout:automatic">
<thead><tr>
<td>Bug Group</td>
<td class="sorttable_sorted">Bug Type<span id="sorttable_sortfwdind">&nbsp;&#x25BE;</span></td>
diff --git a/clang/tools/scan-build/share/scan-build/sorttable.js b/clang/tools/scan-build/share/scan-build/sorttable.js
index 32faa07..e608daa 100644
--- a/clang/tools/scan-build/share/scan-build/sorttable.js
+++ b/clang/tools/scan-build/share/scan-build/sorttable.js
@@ -490,3 +490,23 @@ var forEach = function(object, block, context) {
resolve.forEach(object, block, context);
}
};
+
+// filter results by filename
+const searchFiles = () => {
+ const columns = [
+ { name: 'Filename', index: 2, isFilter: true },
+ ]
+ const filterColumns = columns.filter(c => c.isFilter).map(c => c.index)
+ const trs = document.querySelectorAll(`#reports_table tr:not(.header)`)
+ const filter = document.querySelector('#file_input').value
+ const regex = new RegExp(escape(filter), 'i')
+ const isFoundInTds = td => regex.test(td.innerHTML)
+ const isFound = childrenArr => childrenArr.some(isFoundInTds)
+ const setTrStyleDisplay = ({ style, children }) => {
+ style.display = isFound([
+ ...filterColumns.map(c => children[c])
+ ]) ? '' : 'none'
+ }
+
+ trs.forEach(setTrStyleDisplay)
+}
diff --git a/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp b/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp
index 55af702..4b3b351 100644
--- a/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp
+++ b/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp
@@ -2313,6 +2313,42 @@ TEST(TransferTest, AssignmentOperatorWithInitAndInheritance) {
ASTContext &ASTCtx) {});
}
+TEST(TransferTest, AssignmentOperatorReturnsVoid) {
+ // This is a crash repro.
+ std::string Code = R"(
+ struct S {
+ void operator=(S&& other);
+ };
+ void target() {
+ S s;
+ s = S();
+ // [[p]]
+ }
+ )";
+ runDataflow(
+ Code,
+ [](const llvm::StringMap<DataflowAnalysisState<NoopLattice>> &Results,
+ ASTContext &ASTCtx) {});
+}
+
+TEST(TransferTest, AssignmentOperatorReturnsByValue) {
+ // This is a crash repro.
+ std::string Code = R"(
+ struct S {
+ S operator=(S&& other);
+ };
+ void target() {
+ S s;
+ s = S();
+ // [[p]]
+ }
+ )";
+ runDataflow(
+ Code,
+ [](const llvm::StringMap<DataflowAnalysisState<NoopLattice>> &Results,
+ ASTContext &ASTCtx) {});
+}
+
TEST(TransferTest, CopyConstructor) {
std::string Code = R"(
struct A {
diff --git a/clang/unittests/Format/ConfigParseTest.cpp b/clang/unittests/Format/ConfigParseTest.cpp
index 571e1eb..8c74ed2 100644
--- a/clang/unittests/Format/ConfigParseTest.cpp
+++ b/clang/unittests/Format/ConfigParseTest.cpp
@@ -677,22 +677,36 @@ TEST(ConfigParseTest, ParsesConfiguration) {
" AfterControlStatement: false",
BraceWrapping.AfterControlStatement, FormatStyle::BWACS_Never);
- Style.AlwaysBreakAfterReturnType = FormatStyle::RTBS_All;
- CHECK_PARSE("AlwaysBreakAfterReturnType: None", AlwaysBreakAfterReturnType,
+ Style.BreakAfterReturnType = FormatStyle::RTBS_All;
+ CHECK_PARSE("BreakAfterReturnType: None", BreakAfterReturnType,
FormatStyle::RTBS_None);
- CHECK_PARSE("AlwaysBreakAfterReturnType: Automatic",
- AlwaysBreakAfterReturnType, FormatStyle::RTBS_Automatic);
+ CHECK_PARSE("BreakAfterReturnType: Automatic", BreakAfterReturnType,
+ FormatStyle::RTBS_Automatic);
+ CHECK_PARSE("BreakAfterReturnType: ExceptShortType", BreakAfterReturnType,
+ FormatStyle::RTBS_ExceptShortType);
+ CHECK_PARSE("BreakAfterReturnType: All", BreakAfterReturnType,
+ FormatStyle::RTBS_All);
+ CHECK_PARSE("BreakAfterReturnType: TopLevel", BreakAfterReturnType,
+ FormatStyle::RTBS_TopLevel);
+ CHECK_PARSE("BreakAfterReturnType: AllDefinitions", BreakAfterReturnType,
+ FormatStyle::RTBS_AllDefinitions);
+ CHECK_PARSE("BreakAfterReturnType: TopLevelDefinitions", BreakAfterReturnType,
+ FormatStyle::RTBS_TopLevelDefinitions);
+ // For backward compatibility:
+ CHECK_PARSE("AlwaysBreakAfterReturnType: None", BreakAfterReturnType,
+ FormatStyle::RTBS_None);
+ CHECK_PARSE("AlwaysBreakAfterReturnType: Automatic", BreakAfterReturnType,
+ FormatStyle::RTBS_Automatic);
CHECK_PARSE("AlwaysBreakAfterReturnType: ExceptShortType",
- AlwaysBreakAfterReturnType, FormatStyle::RTBS_ExceptShortType);
- CHECK_PARSE("AlwaysBreakAfterReturnType: All", AlwaysBreakAfterReturnType,
+ BreakAfterReturnType, FormatStyle::RTBS_ExceptShortType);
+ CHECK_PARSE("AlwaysBreakAfterReturnType: All", BreakAfterReturnType,
FormatStyle::RTBS_All);
- CHECK_PARSE("AlwaysBreakAfterReturnType: TopLevel",
- AlwaysBreakAfterReturnType, FormatStyle::RTBS_TopLevel);
+ CHECK_PARSE("AlwaysBreakAfterReturnType: TopLevel", BreakAfterReturnType,
+ FormatStyle::RTBS_TopLevel);
CHECK_PARSE("AlwaysBreakAfterReturnType: AllDefinitions",
- AlwaysBreakAfterReturnType, FormatStyle::RTBS_AllDefinitions);
+ BreakAfterReturnType, FormatStyle::RTBS_AllDefinitions);
CHECK_PARSE("AlwaysBreakAfterReturnType: TopLevelDefinitions",
- AlwaysBreakAfterReturnType,
- FormatStyle::RTBS_TopLevelDefinitions);
+ BreakAfterReturnType, FormatStyle::RTBS_TopLevelDefinitions);
Style.BreakTemplateDeclarations = FormatStyle::BTDS_Yes;
CHECK_PARSE("BreakTemplateDeclarations: Leave", BreakTemplateDeclarations,
diff --git a/clang/unittests/Format/DefinitionBlockSeparatorTest.cpp b/clang/unittests/Format/DefinitionBlockSeparatorTest.cpp
index f548949..7a12093 100644
--- a/clang/unittests/Format/DefinitionBlockSeparatorTest.cpp
+++ b/clang/unittests/Format/DefinitionBlockSeparatorTest.cpp
@@ -144,7 +144,7 @@ TEST_F(DefinitionBlockSeparatorTest, Basic) {
Style);
FormatStyle BreakAfterReturnTypeStyle = Style;
- BreakAfterReturnTypeStyle.AlwaysBreakAfterReturnType = FormatStyle::RTBS_All;
+ BreakAfterReturnTypeStyle.BreakAfterReturnType = FormatStyle::RTBS_All;
// Test uppercased long typename
verifyFormat("class Foo {\n"
" void\n"
diff --git a/clang/unittests/Format/FormatTest.cpp b/clang/unittests/Format/FormatTest.cpp
index 7b65c8d..b0687eae 100644
--- a/clang/unittests/Format/FormatTest.cpp
+++ b/clang/unittests/Format/FormatTest.cpp
@@ -9870,7 +9870,7 @@ TEST_F(FormatTest, ReturnTypeBreakingStyle) {
Style.ColumnLimit = 60;
// No declarations or definitions should be moved to own line.
- Style.AlwaysBreakAfterReturnType = FormatStyle::RTBS_None;
+ Style.BreakAfterReturnType = FormatStyle::RTBS_None;
verifyFormat("class A {\n"
" int f() { return 1; }\n"
" int g();\n"
@@ -9884,7 +9884,7 @@ TEST_F(FormatTest, ReturnTypeBreakingStyle) {
Style);
// It is now allowed to break after a short return type if necessary.
- Style.AlwaysBreakAfterReturnType = FormatStyle::RTBS_Automatic;
+ Style.BreakAfterReturnType = FormatStyle::RTBS_Automatic;
verifyFormat("class A {\n"
" int f() { return 1; }\n"
" int g();\n"
@@ -9898,7 +9898,7 @@ TEST_F(FormatTest, ReturnTypeBreakingStyle) {
Style);
// It now must never break after a short return type.
- Style.AlwaysBreakAfterReturnType = FormatStyle::RTBS_ExceptShortType;
+ Style.BreakAfterReturnType = FormatStyle::RTBS_ExceptShortType;
verifyFormat("class A {\n"
" int f() { return 1; }\n"
" int g();\n"
@@ -9913,7 +9913,7 @@ TEST_F(FormatTest, ReturnTypeBreakingStyle) {
// All declarations and definitions should have the return type moved to its
// own line.
- Style.AlwaysBreakAfterReturnType = FormatStyle::RTBS_All;
+ Style.BreakAfterReturnType = FormatStyle::RTBS_All;
Style.TypenameMacros = {"LIST"};
verifyFormat("SomeType\n"
"funcdecl(LIST(uint64_t));",
@@ -9940,7 +9940,7 @@ TEST_F(FormatTest, ReturnTypeBreakingStyle) {
// Top-level definitions, and no kinds of declarations should have the
// return type moved to its own line.
- Style.AlwaysBreakAfterReturnType = FormatStyle::RTBS_TopLevelDefinitions;
+ Style.BreakAfterReturnType = FormatStyle::RTBS_TopLevelDefinitions;
verifyFormat("class B {\n"
" int f() { return 1; }\n"
" int g();\n"
@@ -9954,7 +9954,7 @@ TEST_F(FormatTest, ReturnTypeBreakingStyle) {
// Top-level definitions and declarations should have the return type moved
// to its own line.
- Style.AlwaysBreakAfterReturnType = FormatStyle::RTBS_TopLevel;
+ Style.BreakAfterReturnType = FormatStyle::RTBS_TopLevel;
verifyFormat("class C {\n"
" int f() { return 1; }\n"
" int g();\n"
@@ -9971,7 +9971,7 @@ TEST_F(FormatTest, ReturnTypeBreakingStyle) {
// All definitions should have the return type moved to its own line, but no
// kinds of declarations.
- Style.AlwaysBreakAfterReturnType = FormatStyle::RTBS_AllDefinitions;
+ Style.BreakAfterReturnType = FormatStyle::RTBS_AllDefinitions;
verifyFormat("class D {\n"
" int\n"
" f() {\n"
@@ -11939,7 +11939,7 @@ TEST_F(FormatTest, UnderstandsAttributes) {
"aaaaaaaaaaaaaaaaaaaaaaa(int i);");
verifyFormat("__attribute__((nodebug)) ::qualified_type f();");
FormatStyle AfterType = getLLVMStyle();
- AfterType.AlwaysBreakAfterReturnType = FormatStyle::RTBS_All;
+ AfterType.BreakAfterReturnType = FormatStyle::RTBS_All;
verifyFormat("__attribute__((nodebug)) void\n"
"foo() {}",
AfterType);
@@ -16491,6 +16491,10 @@ TEST_F(FormatTest, ConfigurableSpaceBeforeParens) {
verifyFormat("T A::operator()();", NoSpace);
verifyFormat("X A::operator++(T);", NoSpace);
verifyFormat("auto lambda = []() { return 0; };", NoSpace);
+ verifyFormat("#if (foo || bar) && baz\n"
+ "#elif ((a || b) && c) || d\n"
+ "#endif",
+ NoSpace);
FormatStyle Space = getLLVMStyle();
Space.SpaceBeforeParens = FormatStyle::SBPO_Always;
@@ -26972,6 +26976,7 @@ TEST_F(FormatTest, RemoveParentheses) {
EXPECT_EQ(Style.RemoveParentheses, FormatStyle::RPS_Leave);
Style.RemoveParentheses = FormatStyle::RPS_MultipleParentheses;
+ verifyFormat("#define Foo(...) foo((__VA_ARGS__))", Style);
verifyFormat("int x __attribute__((aligned(16))) = 0;", Style);
verifyFormat("decltype((foo->bar)) baz;", Style);
verifyFormat("class __declspec(dllimport) X {};",
@@ -27006,6 +27011,7 @@ TEST_F(FormatTest, RemoveParentheses) {
verifyFormat("return (({ 0; }));", "return ((({ 0; })));", Style);
Style.RemoveParentheses = FormatStyle::RPS_ReturnStatement;
+ verifyFormat("#define Return0 return (0);", Style);
verifyFormat("return 0;", "return (0);", Style);
verifyFormat("co_return 0;", "co_return ((0));", Style);
verifyFormat("return 0;", "return (((0)));", Style);
diff --git a/clang/unittests/Format/FormatTestCSharp.cpp b/clang/unittests/Format/FormatTestCSharp.cpp
index 6f5e1e4..de261c0 100644
--- a/clang/unittests/Format/FormatTestCSharp.cpp
+++ b/clang/unittests/Format/FormatTestCSharp.cpp
@@ -505,7 +505,7 @@ TEST_F(FormatTestCSharp, CSharpNullForgiving) {
TEST_F(FormatTestCSharp, AttributesIndentation) {
FormatStyle Style = getMicrosoftStyle(FormatStyle::LK_CSharp);
- Style.AlwaysBreakAfterReturnType = FormatStyle::RTBS_None;
+ Style.BreakAfterReturnType = FormatStyle::RTBS_None;
verifyFormat("[STAThread]\n"
"static void Main(string[] args)\n"
diff --git a/clang/www/cxx_dr_status.html b/clang/www/cxx_dr_status.html
index 8f4ae23..1e527e1 100755
--- a/clang/www/cxx_dr_status.html
+++ b/clang/www/cxx_dr_status.html
@@ -1244,7 +1244,7 @@
<td><a href="https://cplusplus.github.io/CWG/issues/201.html">201</a></td>
<td>CD1</td>
<td>Order of destruction of temporaries in initializers</td>
- <td class="unknown" align="center">Unknown</td>
+ <td class="full" align="center">Clang 2.8</td>
</tr>
<tr id="202">
<td><a href="https://cplusplus.github.io/CWG/issues/202.html">202</a></td>
@@ -1299,7 +1299,7 @@ accessible?</td>
<td><a href="https://cplusplus.github.io/CWG/issues/210.html">210</a></td>
<td>TC1</td>
<td>What is the type matched by an exception handler?</td>
- <td class="unknown" align="center">Unknown</td>
+ <td class="full" align="center">Clang 2.7</td>
</tr>
<tr id="211">
<td><a href="https://cplusplus.github.io/CWG/issues/211.html">211</a></td>
@@ -1792,7 +1792,7 @@ of class templates</td>
<td><a href="https://cplusplus.github.io/CWG/issues/292.html">292</a></td>
<td>CD3</td>
<td>Deallocation on exception in <TT>new</TT> before arguments evaluated</td>
- <td class="unknown" align="center">Unknown</td>
+ <td class="full" align="center">Clang 2.9</td>
</tr>
<tr class="open" id="293">
<td><a href="https://cplusplus.github.io/CWG/issues/293.html">293</a></td>
diff --git a/compiler-rt/lib/asan_abi/asan_abi_shim.cpp b/compiler-rt/lib/asan_abi/asan_abi_shim.cpp
index 35c45df..2512abc 100644
--- a/compiler-rt/lib/asan_abi/asan_abi_shim.cpp
+++ b/compiler-rt/lib/asan_abi/asan_abi_shim.cpp
@@ -54,7 +54,7 @@ void *__asan_memmove(void *dest, const void *src, uptr n) {
// Functions concerning RTL startup and initialization
void __asan_init(void) {
- static_assert(sizeof(uptr) == 8);
+ static_assert(sizeof(uptr) == 8 || sizeof(uptr) == 4);
static_assert(sizeof(u64) == 8);
static_assert(sizeof(u32) == 4);
diff --git a/compiler-rt/lib/ubsan/ubsan_signals_standalone.cpp b/compiler-rt/lib/ubsan/ubsan_signals_standalone.cpp
index 354f847..68edd3a 100644
--- a/compiler-rt/lib/ubsan/ubsan_signals_standalone.cpp
+++ b/compiler-rt/lib/ubsan/ubsan_signals_standalone.cpp
@@ -66,6 +66,11 @@ void InitializeDeadlySignals() {
return;
is_initialized = true;
InitializeSignalInterceptors();
+#if SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION
+ // REAL(sigaction_symname) is nullptr in a static link. Bail out.
+ if (!REAL(sigaction_symname))
+ return;
+#endif
InstallDeadlySignalHandlers(&UBsanOnDeadlySignal);
}
diff --git a/compiler-rt/test/CMakeLists.txt b/compiler-rt/test/CMakeLists.txt
index ee2ef90..c186be1 100644
--- a/compiler-rt/test/CMakeLists.txt
+++ b/compiler-rt/test/CMakeLists.txt
@@ -116,7 +116,11 @@ endif()
# Now that we've traversed all the directories and know all the lit testsuites,
# introduce a rule to run to run all of them.
-add_custom_target(compiler-rt-test-depends DEPENDS ${LLVM_COMPILER_RT_LIT_DEPENDS})
+get_property(LLVM_COMPILER_RT_LIT_DEPENDS GLOBAL PROPERTY LLVM_COMPILER_RT_LIT_DEPENDS)
+add_custom_target(compiler-rt-test-depends)
+if(LLVM_COMPILER_RT_LIT_DEPENDS)
+ add_dependencies(compiler-rt-test-depends ${LLVM_COMPILER_RT_LIT_DEPENDS})
+endif()
umbrella_lit_testsuite_end(check-compiler-rt)
if(COMPILER_RT_STANDALONE_BUILD)
diff --git a/compiler-rt/test/ubsan/TestCases/Misc/Linux/static-link.cpp b/compiler-rt/test/ubsan/TestCases/Misc/Linux/static-link.cpp
index cd18504..c20231c 100644
--- a/compiler-rt/test/ubsan/TestCases/Misc/Linux/static-link.cpp
+++ b/compiler-rt/test/ubsan/TestCases/Misc/Linux/static-link.cpp
@@ -1,7 +1,8 @@
// REQUIRES: ubsan-standalone
// REQUIRES: target={{x86_64.*}}
-// REQUIRES: librt_has_multf3
+// UNSUPPORTED: i386-target-arch, internal_symbolizer
// RUN: %clangxx -fsanitize=bool -static %s -o %t && UBSAN_OPTIONS=handle_segv=0:handle_sigbus=0:handle_sigfpe=0 %run %t 2>&1 | FileCheck %s
+// RUN: %run %t 2>&1 | FileCheck %s
#include <signal.h>
#include <stdio.h>
diff --git a/flang/docs/FortranLLVMTestSuite.md b/flang/docs/FortranLLVMTestSuite.md
index f07d415..45485ef 100644
--- a/flang/docs/FortranLLVMTestSuite.md
+++ b/flang/docs/FortranLLVMTestSuite.md
@@ -62,7 +62,7 @@ cmake -G "Ninja" -DCMAKE_C_COMPILER=gcc -DCMAKE_CXX_COMPILER=g++ \
Tests from the gfortran test suite have been imported into the LLVM Test Suite.
The tests will be run automatically if the test suite is built following the
-instructions described [above](#running-the-LLVM-test-suite-with-fortran).
+instructions described [above](#running-the-llvm-test-suite-with-fortran).
There are additional configure-time options that can be used with the gfortran
tests. More details about those options and their purpose can be found in
[`Fortran/gfortran/README.md`](https://github.com/llvm/llvm-test-suite/tree/main/Fortran/gfortran/README.md)`.
diff --git a/flang/docs/index.md b/flang/docs/index.md
index ff8f4a2..d974a36 100644
--- a/flang/docs/index.md
+++ b/flang/docs/index.md
@@ -68,6 +68,7 @@ on how to get in touch with us and to learn more about the current status.
OpenACC
OpenACC-descriptor-management.md
OpenMP-4.5-grammar.md
+ OpenMP-descriptor-management
OpenMP-semantics
OptionComparison
Overview
diff --git a/flang/include/flang/Optimizer/Dialect/FIRAttr.td b/flang/include/flang/Optimizer/Dialect/FIRAttr.td
index 00e293e..66d6cd4 100644
--- a/flang/include/flang/Optimizer/Dialect/FIRAttr.td
+++ b/flang/include/flang/Optimizer/Dialect/FIRAttr.td
@@ -113,4 +113,28 @@ def fir_CUDAProcAttributeAttr :
let assemblyFormat = [{ ```<` $value `>` }];
}
+def fir_CUDALaunchBoundsAttr : fir_Attr<"CUDALaunchBounds"> {
+ let mnemonic = "launch_bounds";
+
+ let parameters = (ins
+ "mlir::IntegerAttr":$maxTPB,
+ "mlir::IntegerAttr":$minBPM,
+ OptionalParameter<"mlir::IntegerAttr">:$upperBoundClusterSize
+ );
+
+ let assemblyFormat = "`<` struct(params) `>`";
+}
+
+def fir_CUDAClusterDimsAttr : fir_Attr<"CUDAClusterDims"> {
+ let mnemonic = "cluster_dims";
+
+ let parameters = (ins
+ "mlir::IntegerAttr":$x,
+ "mlir::IntegerAttr":$y,
+ "mlir::IntegerAttr":$z
+ );
+
+ let assemblyFormat = "`<` struct(params) `>`";
+}
+
#endif // FIR_DIALECT_FIR_ATTRS
diff --git a/flang/include/flang/Optimizer/Dialect/FIRDialect.h b/flang/include/flang/Optimizer/Dialect/FIRDialect.h
index 440fe77..2383855 100644
--- a/flang/include/flang/Optimizer/Dialect/FIRDialect.h
+++ b/flang/include/flang/Optimizer/Dialect/FIRDialect.h
@@ -70,6 +70,9 @@ bool canLegallyInline(mlir::Operation *, mlir::Operation *, bool);
// Register the FIRInlinerInterface to FIROpsDialect
void addFIRInlinerExtension(mlir::DialectRegistry &registry);
+// Register implementation of LLVMTranslationDialectInterface.
+void addFIRToLLVMIRExtension(mlir::DialectRegistry &registry);
+
} // namespace fir
#endif // FORTRAN_OPTIMIZER_DIALECT_FIRDIALECT_H
diff --git a/flang/include/flang/Optimizer/Dialect/FIROpsSupport.h b/flang/include/flang/Optimizer/Dialect/FIROpsSupport.h
index 6ac6a31..e8226b6 100644
--- a/flang/include/flang/Optimizer/Dialect/FIROpsSupport.h
+++ b/flang/include/flang/Optimizer/Dialect/FIROpsSupport.h
@@ -75,6 +75,16 @@ static constexpr llvm::StringRef getTargetAttrName() { return "fir.target"; }
/// Attribute to mark Fortran entities with the CUDA attribute.
static constexpr llvm::StringRef getCUDAAttrName() { return "fir.cuda_attr"; }
+/// Attribute to carry CUDA launch_bounds values.
+static constexpr llvm::StringRef getCUDALaunchBoundsAttrName() {
+ return "fir.cuda_launch_bounds";
+}
+
+/// Attribute to carry CUDA cluster_dims values.
+static constexpr llvm::StringRef getCUDAClusterDimsAttrName() {
+ return "fir.cuda_cluster_dims";
+}
+
/// Attribute to mark that a function argument is a character dummy procedure.
/// Character dummy procedure have special ABI constraints.
static constexpr llvm::StringRef getCharacterProcedureDummyAttrName() {
diff --git a/flang/include/flang/Optimizer/Support/InitFIR.h b/flang/include/flang/Optimizer/Support/InitFIR.h
index f376840..9f4c4ed 100644
--- a/flang/include/flang/Optimizer/Support/InitFIR.h
+++ b/flang/include/flang/Optimizer/Support/InitFIR.h
@@ -58,6 +58,7 @@ inline void addFIRExtensions(mlir::DialectRegistry &registry,
bool addFIRInlinerInterface = true) {
if (addFIRInlinerInterface)
addFIRInlinerExtension(registry);
+ addFIRToLLVMIRExtension(registry);
}
inline void loadNonCodegenDialects(mlir::MLIRContext &context) {
diff --git a/flang/include/flang/Optimizer/Support/Utils.h b/flang/include/flang/Optimizer/Support/Utils.h
index 4e06bf8..7a8a34c 100644
--- a/flang/include/flang/Optimizer/Support/Utils.h
+++ b/flang/include/flang/Optimizer/Support/Utils.h
@@ -18,7 +18,6 @@
#include "flang/Optimizer/Builder/Todo.h"
#include "flang/Optimizer/Dialect/FIROps.h"
#include "flang/Optimizer/Dialect/FIRType.h"
-#include "flang/Optimizer/HLFIR/HLFIRDialect.h"
#include "flang/Optimizer/Support/FatalError.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
@@ -135,144 +134,6 @@ inline void intrinsicTypeTODO(fir::FirOpBuilder &builder, mlir::Type type,
" in " + intrinsicName);
}
-using MinlocBodyOpGeneratorTy = llvm::function_ref<mlir::Value(
- fir::FirOpBuilder &, mlir::Location, const mlir::Type &, mlir::Value,
- mlir::Value, mlir::Value, const llvm::SmallVectorImpl<mlir::Value> &)>;
-using InitValGeneratorTy = llvm::function_ref<mlir::Value(
- fir::FirOpBuilder &, mlir::Location, const mlir::Type &)>;
-using AddrGeneratorTy = llvm::function_ref<mlir::Value(
- fir::FirOpBuilder &, mlir::Location, const mlir::Type &, mlir::Value,
- mlir::Value)>;
-
-// Produces a loop nest for a Minloc intrinsic.
-inline void genMinMaxlocReductionLoop(
- fir::FirOpBuilder &builder, mlir::Value array,
- fir::InitValGeneratorTy initVal, fir::MinlocBodyOpGeneratorTy genBody,
- fir::AddrGeneratorTy getAddrFn, unsigned rank, mlir::Type elementType,
- mlir::Location loc, mlir::Type maskElemType, mlir::Value resultArr,
- bool maskMayBeLogicalScalar) {
- mlir::IndexType idxTy = builder.getIndexType();
-
- mlir::Value zeroIdx = builder.createIntegerConstant(loc, idxTy, 0);
-
- fir::SequenceType::Shape flatShape(rank,
- fir::SequenceType::getUnknownExtent());
- mlir::Type arrTy = fir::SequenceType::get(flatShape, elementType);
- mlir::Type boxArrTy = fir::BoxType::get(arrTy);
- array = builder.create<fir::ConvertOp>(loc, boxArrTy, array);
-
- mlir::Type resultElemType = hlfir::getFortranElementType(resultArr.getType());
- mlir::Value flagSet = builder.createIntegerConstant(loc, resultElemType, 1);
- mlir::Value zero = builder.createIntegerConstant(loc, resultElemType, 0);
- mlir::Value flagRef = builder.createTemporary(loc, resultElemType);
- builder.create<fir::StoreOp>(loc, zero, flagRef);
-
- mlir::Value init = initVal(builder, loc, elementType);
- llvm::SmallVector<mlir::Value, Fortran::common::maxRank> bounds;
-
- assert(rank > 0 && "rank cannot be zero");
- mlir::Value one = builder.createIntegerConstant(loc, idxTy, 1);
-
- // Compute all the upper bounds before the loop nest.
- // It is not strictly necessary for performance, since the loop nest
- // does not have any store operations and any LICM optimization
- // should be able to optimize the redundancy.
- for (unsigned i = 0; i < rank; ++i) {
- mlir::Value dimIdx = builder.createIntegerConstant(loc, idxTy, i);
- auto dims =
- builder.create<fir::BoxDimsOp>(loc, idxTy, idxTy, idxTy, array, dimIdx);
- mlir::Value len = dims.getResult(1);
- // We use C indexing here, so len-1 as loopcount
- mlir::Value loopCount = builder.create<mlir::arith::SubIOp>(loc, len, one);
- bounds.push_back(loopCount);
- }
- // Create a loop nest consisting of OP operations.
- // Collect the loops' induction variables into indices array,
- // which will be used in the innermost loop to load the input
- // array's element.
- // The loops are generated such that the innermost loop processes
- // the 0 dimension.
- llvm::SmallVector<mlir::Value, Fortran::common::maxRank> indices;
- for (unsigned i = rank; 0 < i; --i) {
- mlir::Value step = one;
- mlir::Value loopCount = bounds[i - 1];
- auto loop =
- builder.create<fir::DoLoopOp>(loc, zeroIdx, loopCount, step, false,
- /*finalCountValue=*/false, init);
- init = loop.getRegionIterArgs()[0];
- indices.push_back(loop.getInductionVar());
- // Set insertion point to the loop body so that the next loop
- // is inserted inside the current one.
- builder.setInsertionPointToStart(loop.getBody());
- }
-
- // Reverse the indices such that they are ordered as:
- // <dim-0-idx, dim-1-idx, ...>
- std::reverse(indices.begin(), indices.end());
- mlir::Value reductionVal =
- genBody(builder, loc, elementType, array, flagRef, init, indices);
-
- // Unwind the loop nest and insert ResultOp on each level
- // to return the updated value of the reduction to the enclosing
- // loops.
- for (unsigned i = 0; i < rank; ++i) {
- auto result = builder.create<fir::ResultOp>(loc, reductionVal);
- // Proceed to the outer loop.
- auto loop = mlir::cast<fir::DoLoopOp>(result->getParentOp());
- reductionVal = loop.getResult(0);
- // Set insertion point after the loop operation that we have
- // just processed.
- builder.setInsertionPointAfter(loop.getOperation());
- }
- // End of loop nest. The insertion point is after the outermost loop.
- if (maskMayBeLogicalScalar) {
- if (fir::IfOp ifOp =
- mlir::dyn_cast<fir::IfOp>(builder.getBlock()->getParentOp())) {
- builder.create<fir::ResultOp>(loc, reductionVal);
- builder.setInsertionPointAfter(ifOp);
- // Redefine flagSet to escape scope of ifOp
- flagSet = builder.createIntegerConstant(loc, resultElemType, 1);
- reductionVal = ifOp.getResult(0);
- }
- }
-
- // Check for case where array was full of max values.
- // flag will be 0 if mask was never true, 1 if mask was true as some point,
- // this is needed to avoid catching cases where we didn't access any elements
- // e.g. mask=.FALSE.
- mlir::Value flagValue =
- builder.create<fir::LoadOp>(loc, resultElemType, flagRef);
- mlir::Value flagCmp = builder.create<mlir::arith::CmpIOp>(
- loc, mlir::arith::CmpIPredicate::eq, flagValue, flagSet);
- fir::IfOp ifMaskTrueOp =
- builder.create<fir::IfOp>(loc, flagCmp, /*withElseRegion=*/false);
- builder.setInsertionPointToStart(&ifMaskTrueOp.getThenRegion().front());
-
- mlir::Value testInit = initVal(builder, loc, elementType);
- fir::IfOp ifMinSetOp;
- if (elementType.isa<mlir::FloatType>()) {
- mlir::Value cmp = builder.create<mlir::arith::CmpFOp>(
- loc, mlir::arith::CmpFPredicate::OEQ, testInit, reductionVal);
- ifMinSetOp = builder.create<fir::IfOp>(loc, cmp,
- /*withElseRegion*/ false);
- } else {
- mlir::Value cmp = builder.create<mlir::arith::CmpIOp>(
- loc, mlir::arith::CmpIPredicate::eq, testInit, reductionVal);
- ifMinSetOp = builder.create<fir::IfOp>(loc, cmp,
- /*withElseRegion*/ false);
- }
- builder.setInsertionPointToStart(&ifMinSetOp.getThenRegion().front());
-
- // Load output array with 1s instead of 0s
- for (unsigned int i = 0; i < rank; ++i) {
- mlir::Value index = builder.createIntegerConstant(loc, idxTy, i);
- mlir::Value resultElemAddr =
- getAddrFn(builder, loc, resultElemType, resultArr, index);
- builder.create<fir::StoreOp>(loc, flagSet, resultElemAddr);
- }
- builder.setInsertionPointAfter(ifMaskTrueOp);
-}
-
inline fir::CUDADataAttributeAttr
getCUDADataAttribute(mlir::MLIRContext *mlirContext,
std::optional<Fortran::common::CUDADataAttr> cudaAttr) {
diff --git a/flang/include/flang/Optimizer/Transforms/Utils.h b/flang/include/flang/Optimizer/Transforms/Utils.h
new file mode 100644
index 0000000..49a616f
--- /dev/null
+++ b/flang/include/flang/Optimizer/Transforms/Utils.h
@@ -0,0 +1,38 @@
+//===-- Optimizer/Transforms/Utils.h ----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef FORTRAN_OPTIMIZER_TRANSFORMS_UTILS_H
+#define FORTRAN_OPTIMIZER_TRANSFORMS_UTILS_H
+
+namespace fir {
+
+using MinlocBodyOpGeneratorTy = llvm::function_ref<mlir::Value(
+ fir::FirOpBuilder &, mlir::Location, const mlir::Type &, mlir::Value,
+ mlir::Value, mlir::Value, const llvm::SmallVectorImpl<mlir::Value> &)>;
+using InitValGeneratorTy = llvm::function_ref<mlir::Value(
+ fir::FirOpBuilder &, mlir::Location, const mlir::Type &)>;
+using AddrGeneratorTy = llvm::function_ref<mlir::Value(
+ fir::FirOpBuilder &, mlir::Location, const mlir::Type &, mlir::Value,
+ mlir::Value)>;
+
+// Produces a loop nest for a Minloc intrinsic.
+void genMinMaxlocReductionLoop(fir::FirOpBuilder &builder, mlir::Value array,
+ fir::InitValGeneratorTy initVal,
+ fir::MinlocBodyOpGeneratorTy genBody,
+ fir::AddrGeneratorTy getAddrFn, unsigned rank,
+ mlir::Type elementType, mlir::Location loc,
+ mlir::Type maskElemType, mlir::Value resultArr,
+ bool maskMayBeLogicalScalar);
+
+} // namespace fir
+
+#endif // FORTRAN_OPTIMIZER_TRANSFORMS_UTILS_H
diff --git a/flang/lib/Frontend/CompilerInvocation.cpp b/flang/lib/Frontend/CompilerInvocation.cpp
index ffde7f5..4707de0 100644
--- a/flang/lib/Frontend/CompilerInvocation.cpp
+++ b/flang/lib/Frontend/CompilerInvocation.cpp
@@ -1326,10 +1326,23 @@ void CompilerInvocation::setDefaultPredefinitions() {
Fortran::common::setOpenMPMacro(getLangOpts().OpenMPVersion,
fortranOptions.predefinitions);
}
+
llvm::Triple targetTriple{llvm::Triple(this->targetOpts.triple)};
- if (targetTriple.getArch() == llvm::Triple::ArchType::x86_64) {
+ switch (targetTriple.getArch()) {
+ default:
+ break;
+ case llvm::Triple::ArchType::x86_64:
fortranOptions.predefinitions.emplace_back("__x86_64__", "1");
fortranOptions.predefinitions.emplace_back("__x86_64", "1");
+ break;
+ case llvm::Triple::ArchType::ppc:
+ case llvm::Triple::ArchType::ppcle:
+ case llvm::Triple::ArchType::ppc64:
+ case llvm::Triple::ArchType::ppc64le:
+ // '__powerpc__' is a generic macro for any PowerPC cases. e.g. Max integer
+ // size.
+ fortranOptions.predefinitions.emplace_back("__powerpc__", "1");
+ break;
}
}
diff --git a/flang/lib/Lower/CallInterface.cpp b/flang/lib/Lower/CallInterface.cpp
index 41597c1..6b71aab 100644
--- a/flang/lib/Lower/CallInterface.cpp
+++ b/flang/lib/Lower/CallInterface.cpp
@@ -524,6 +524,57 @@ static void addSymbolAttribute(mlir::func::FuncOp func,
mlir::StringAttr::get(&mlirContext, name));
}
+static void
+setCUDAAttributes(mlir::func::FuncOp func,
+ const Fortran::semantics::Symbol *sym,
+ std::optional<Fortran::evaluate::characteristics::Procedure>
+ characteristic) {
+ if (characteristic && characteristic->cudaSubprogramAttrs) {
+ func.getOperation()->setAttr(
+ fir::getCUDAAttrName(),
+ fir::getCUDAProcAttribute(func.getContext(),
+ *characteristic->cudaSubprogramAttrs));
+ }
+
+ if (sym) {
+ if (auto details =
+ sym->GetUltimate()
+ .detailsIf<Fortran::semantics::SubprogramDetails>()) {
+ mlir::Type i64Ty = mlir::IntegerType::get(func.getContext(), 64);
+ if (!details->cudaLaunchBounds().empty()) {
+ assert(details->cudaLaunchBounds().size() >= 2 &&
+ "expect at least 2 values");
+ auto maxTPBAttr =
+ mlir::IntegerAttr::get(i64Ty, details->cudaLaunchBounds()[0]);
+ auto minBPMAttr =
+ mlir::IntegerAttr::get(i64Ty, details->cudaLaunchBounds()[1]);
+ mlir::IntegerAttr ubAttr;
+ if (details->cudaLaunchBounds().size() > 2)
+ ubAttr =
+ mlir::IntegerAttr::get(i64Ty, details->cudaLaunchBounds()[2]);
+ func.getOperation()->setAttr(
+ fir::getCUDALaunchBoundsAttrName(),
+ fir::CUDALaunchBoundsAttr::get(func.getContext(), maxTPBAttr,
+ minBPMAttr, ubAttr));
+ }
+
+ if (!details->cudaClusterDims().empty()) {
+ assert(details->cudaClusterDims().size() == 3 && "expect 3 values");
+ auto xAttr =
+ mlir::IntegerAttr::get(i64Ty, details->cudaClusterDims()[0]);
+ auto yAttr =
+ mlir::IntegerAttr::get(i64Ty, details->cudaClusterDims()[1]);
+ auto zAttr =
+ mlir::IntegerAttr::get(i64Ty, details->cudaClusterDims()[2]);
+ func.getOperation()->setAttr(
+ fir::getCUDAClusterDimsAttrName(),
+ fir::CUDAClusterDimsAttr::get(func.getContext(), xAttr, yAttr,
+ zAttr));
+ }
+ }
+ }
+}
+
/// Declare drives the different actions to be performed while analyzing the
/// signature and building/finding the mlir::func::FuncOp.
template <typename T>
@@ -559,12 +610,8 @@ void Fortran::lower::CallInterface<T>::declare() {
if (!placeHolder.value().attributes.empty())
func.setArgAttrs(placeHolder.index(), placeHolder.value().attributes);
side().setFuncAttrs(func);
- }
- if (characteristic && characteristic->cudaSubprogramAttrs) {
- func.getOperation()->setAttr(
- fir::getCUDAAttrName(),
- fir::getCUDAProcAttribute(func.getContext(),
- *characteristic->cudaSubprogramAttrs));
+
+ setCUDAAttributes(func, side().getProcedureSymbol(), characteristic);
}
}
}
diff --git a/flang/lib/Lower/OpenMP.cpp b/flang/lib/Lower/OpenMP.cpp
index e588762..24f9176 100644
--- a/flang/lib/Lower/OpenMP.cpp
+++ b/flang/lib/Lower/OpenMP.cpp
@@ -2825,7 +2825,8 @@ genEnterExitUpdateDataOp(Fortran::lower::AbstractConverter &converter,
directive);
return firOpBuilder.create<OpTy>(currentLocation, ifClauseOperand,
- deviceOperand, nowaitAttr, mapOperands);
+ deviceOperand, nullptr, mlir::ValueRange(),
+ nowaitAttr, mapOperands);
}
// This functions creates a block for the body of the targetOp's region. It adds
@@ -3090,7 +3091,7 @@ genTargetOp(Fortran::lower::AbstractConverter &converter,
auto targetOp = converter.getFirOpBuilder().create<mlir::omp::TargetOp>(
currentLocation, ifClauseOperand, deviceOperand, threadLimitOperand,
- nowaitAttr, mapOperands);
+ nullptr, mlir::ValueRange(), nowaitAttr, mapOperands);
genBodyOfTargetOp(converter, semaCtx, eval, genNested, targetOp, mapSymTypes,
mapSymLocs, mapSymbols, currentLocation);
@@ -3351,6 +3352,58 @@ genLoopVars(mlir::Operation *op, Fortran::lower::AbstractConverter &converter,
return args;
}
+static llvm::SmallVector<const Fortran::semantics::Symbol *>
+genLoopAndReductionVars(
+ mlir::Operation *op, Fortran::lower::AbstractConverter &converter,
+ mlir::Location &loc,
+ const llvm::SmallVector<const Fortran::semantics::Symbol *> &loopArgs,
+ const llvm::SmallVector<const Fortran::semantics::Symbol *> &reductionArgs,
+ llvm::SmallVector<mlir::Type> &reductionTypes) {
+ fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder();
+
+ llvm::SmallVector<mlir::Type> blockArgTypes;
+ llvm::SmallVector<mlir::Location> blockArgLocs;
+ blockArgTypes.reserve(loopArgs.size() + reductionArgs.size());
+ blockArgLocs.reserve(blockArgTypes.size());
+ mlir::Block *entryBlock;
+
+ if (loopArgs.size()) {
+ std::size_t loopVarTypeSize = 0;
+ for (const Fortran::semantics::Symbol *arg : loopArgs)
+ loopVarTypeSize = std::max(loopVarTypeSize, arg->GetUltimate().size());
+ mlir::Type loopVarType = getLoopVarType(converter, loopVarTypeSize);
+ std::fill_n(std::back_inserter(blockArgTypes), loopArgs.size(),
+ loopVarType);
+ std::fill_n(std::back_inserter(blockArgLocs), loopArgs.size(), loc);
+ }
+ if (reductionArgs.size()) {
+ llvm::copy(reductionTypes, std::back_inserter(blockArgTypes));
+ std::fill_n(std::back_inserter(blockArgLocs), reductionArgs.size(), loc);
+ }
+ entryBlock = firOpBuilder.createBlock(&op->getRegion(0), {}, blockArgTypes,
+ blockArgLocs);
+ // The argument is not currently in memory, so make a temporary for the
+ // argument, and store it there, then bind that location to the argument.
+ if (loopArgs.size()) {
+ mlir::Operation *storeOp = nullptr;
+ for (auto [argIndex, argSymbol] : llvm::enumerate(loopArgs)) {
+ mlir::Value indexVal =
+ fir::getBase(op->getRegion(0).front().getArgument(argIndex));
+ storeOp =
+ createAndSetPrivatizedLoopVar(converter, loc, indexVal, argSymbol);
+ }
+ firOpBuilder.setInsertionPointAfter(storeOp);
+ }
+ // Bind the reduction arguments to their block arguments
+ for (auto [arg, prv] : llvm::zip_equal(
+ reductionArgs,
+ llvm::drop_begin(entryBlock->getArguments(), loopArgs.size()))) {
+ converter.bindSymbol(*arg, prv);
+ }
+
+ return loopArgs;
+}
+
static void
createSimdLoop(Fortran::lower::AbstractConverter &converter,
Fortran::semantics::SemanticsContext &semaCtx,
@@ -3428,6 +3481,7 @@ static void createWsLoop(Fortran::lower::AbstractConverter &converter,
llvm::SmallVector<mlir::Value> linearVars, linearStepVars;
llvm::SmallVector<const Fortran::semantics::Symbol *> iv;
llvm::SmallVector<mlir::Attribute> reductionDeclSymbols;
+ llvm::SmallVector<const Fortran::semantics::Symbol *> reductionSymbols;
mlir::omp::ClauseOrderKindAttr orderClauseOperand;
mlir::omp::ClauseScheduleKindAttr scheduleValClauseOperand;
mlir::UnitAttr nowaitClauseOperand, scheduleSimdClauseOperand;
@@ -3439,7 +3493,8 @@ static void createWsLoop(Fortran::lower::AbstractConverter &converter,
cp.processCollapse(loc, eval, lowerBound, upperBound, step, iv,
loopVarTypeSize);
cp.processScheduleChunk(stmtCtx, scheduleChunkClauseOperand);
- cp.processReduction(loc, reductionVars, reductionDeclSymbols);
+ cp.processReduction(loc, reductionVars, reductionDeclSymbols,
+ &reductionSymbols);
cp.processTODO<Fortran::parser::OmpClause::Linear,
Fortran::parser::OmpClause::Order>(loc, ompDirective);
@@ -3483,14 +3538,21 @@ static void createWsLoop(Fortran::lower::AbstractConverter &converter,
auto *nestedEval = getCollapsedLoopEval(
eval, Fortran::lower::getCollapseValue(beginClauseList));
+ llvm::SmallVector<mlir::Type> reductionTypes;
+ reductionTypes.reserve(reductionVars.size());
+ llvm::transform(reductionVars, std::back_inserter(reductionTypes),
+ [](mlir::Value v) { return v.getType(); });
+
auto ivCallback = [&](mlir::Operation *op) {
- return genLoopVars(op, converter, loc, iv);
+ return genLoopAndReductionVars(op, converter, loc, iv, reductionSymbols,
+ reductionTypes);
};
createBodyOfOp<mlir::omp::WsLoopOp>(
wsLoopOp, OpWithBodyGenInfo(converter, semaCtx, loc, *nestedEval)
.setClauses(&beginClauseList)
.setDataSharingProcessor(&dsp)
+ .setReductions(&reductionSymbols, &reductionTypes)
.setGenRegionEntryCb(ivCallback));
}
@@ -3593,12 +3655,11 @@ static void genOMP(Fortran::lower::AbstractConverter &converter,
// 2.9.3.1 SIMD construct
createSimdLoop(converter, semaCtx, eval, ompDirective, loopOpClauseList,
currentLocation);
+ genOpenMPReduction(converter, semaCtx, loopOpClauseList);
} else {
createWsLoop(converter, semaCtx, eval, ompDirective, loopOpClauseList,
endClauseList, currentLocation);
}
-
- genOpenMPReduction(converter, semaCtx, loopOpClauseList);
}
static void
diff --git a/flang/lib/Optimizer/Dialect/CMakeLists.txt b/flang/lib/Optimizer/Dialect/CMakeLists.txt
index 58a4276..745439b 100644
--- a/flang/lib/Optimizer/Dialect/CMakeLists.txt
+++ b/flang/lib/Optimizer/Dialect/CMakeLists.txt
@@ -13,7 +13,6 @@ add_flang_library(FIRDialect
CanonicalizationPatternsIncGen
MLIRIR
FIROpsIncGen
- HLFIROpsIncGen
intrinsics_gen
LINK_LIBS
diff --git a/flang/lib/Optimizer/Dialect/FIRAttr.cpp b/flang/lib/Optimizer/Dialect/FIRAttr.cpp
index 8df7a6c..0cf8dfb 100644
--- a/flang/lib/Optimizer/Dialect/FIRAttr.cpp
+++ b/flang/lib/Optimizer/Dialect/FIRAttr.cpp
@@ -298,5 +298,6 @@ void fir::printFirAttribute(FIROpsDialect *dialect, mlir::Attribute attr,
void FIROpsDialect::registerAttributes() {
addAttributes<ClosedIntervalAttr, ExactTypeAttr, FortranVariableFlagsAttr,
LowerBoundAttr, PointIntervalAttr, RealAttr, SubclassAttr,
- UpperBoundAttr, CUDADataAttributeAttr, CUDAProcAttributeAttr>();
+ UpperBoundAttr, CUDADataAttributeAttr, CUDAProcAttributeAttr,
+ CUDALaunchBoundsAttr, CUDAClusterDimsAttr>();
}
diff --git a/flang/lib/Optimizer/Dialect/FIRDialect.cpp b/flang/lib/Optimizer/Dialect/FIRDialect.cpp
index f4589fd..850b612 100644
--- a/flang/lib/Optimizer/Dialect/FIRDialect.cpp
+++ b/flang/lib/Optimizer/Dialect/FIRDialect.cpp
@@ -15,6 +15,7 @@
#include "flang/Optimizer/Dialect/FIROps.h"
#include "flang/Optimizer/Dialect/FIRType.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
+#include "mlir/Target/LLVMIR/ModuleTranslation.h"
#include "mlir/Transforms/InliningUtils.h"
using namespace fir;
@@ -77,6 +78,22 @@ void fir::addFIRInlinerExtension(mlir::DialectRegistry &registry) {
});
}
+// We do not provide LLVMTranslationDialectInterface implementation
+// for FIR dialect, since at the point of translation to LLVM IR
+// there should not be any FIR operations (the CodeGen converts
+// them to LLVMIR dialect operations).
+// Here we register the default implementation of
+// LLVMTranslationDialectInterface that will drop all FIR dialect
+// attributes - this helps to avoid warnings about unhandled attributes.
+// We can provide our own implementation of the interface,
+// when more sophisticated translation is required.
+void fir::addFIRToLLVMIRExtension(mlir::DialectRegistry &registry) {
+ registry.addExtension(
+ +[](mlir::MLIRContext *ctx, fir::FIROpsDialect *dialect) {
+ dialect->addInterface<mlir::LLVMTranslationDialectInterface>();
+ });
+}
+
// anchor the class vtable to this compilation unit
fir::FIROpsDialect::~FIROpsDialect() {
// do nothing
diff --git a/flang/lib/Optimizer/HLFIR/Transforms/BufferizeHLFIR.cpp b/flang/lib/Optimizer/HLFIR/Transforms/BufferizeHLFIR.cpp
index bd8d3d9..1c4f82e 100644
--- a/flang/lib/Optimizer/HLFIR/Transforms/BufferizeHLFIR.cpp
+++ b/flang/lib/Optimizer/HLFIR/Transforms/BufferizeHLFIR.cpp
@@ -739,12 +739,12 @@ struct HLFIRListener : public mlir::OpBuilder::Listener {
void notifyOperationInserted(mlir::Operation *op,
mlir::OpBuilder::InsertPoint previous) override {
builder.notifyOperationInserted(op, previous);
- rewriter.notifyOperationInserted(op, previous);
+ rewriter.getListener()->notifyOperationInserted(op, previous);
}
virtual void notifyBlockInserted(mlir::Block *block, mlir::Region *previous,
mlir::Region::iterator previousIt) override {
builder.notifyBlockInserted(block, previous, previousIt);
- rewriter.notifyBlockInserted(block, previous, previousIt);
+ rewriter.getListener()->notifyBlockInserted(block, previous, previousIt);
}
fir::FirOpBuilder &builder;
mlir::ConversionPatternRewriter &rewriter;
diff --git a/flang/lib/Optimizer/HLFIR/Transforms/CMakeLists.txt b/flang/lib/Optimizer/HLFIR/Transforms/CMakeLists.txt
index 603b328..ad569ce 100644
--- a/flang/lib/Optimizer/HLFIR/Transforms/CMakeLists.txt
+++ b/flang/lib/Optimizer/HLFIR/Transforms/CMakeLists.txt
@@ -21,6 +21,7 @@ add_flang_library(HLFIRTransforms
FIRBuilder
FIRDialectSupport
FIRSupport
+ FIRTransforms
HLFIRDialect
MLIRIR
${dialect_libs}
diff --git a/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp b/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp
index 523671f..c2512c7 100644
--- a/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp
+++ b/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp
@@ -20,7 +20,7 @@
#include "flang/Optimizer/HLFIR/HLFIRDialect.h"
#include "flang/Optimizer/HLFIR/HLFIROps.h"
#include "flang/Optimizer/HLFIR/Passes.h"
-#include "flang/Optimizer/Support/Utils.h"
+#include "flang/Optimizer/Transforms/Utils.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Dominance.h"
#include "mlir/IR/PatternMatch.h"
diff --git a/flang/lib/Optimizer/Transforms/SimplifyIntrinsics.cpp b/flang/lib/Optimizer/Transforms/SimplifyIntrinsics.cpp
index b415463..86343e2 100644
--- a/flang/lib/Optimizer/Transforms/SimplifyIntrinsics.cpp
+++ b/flang/lib/Optimizer/Transforms/SimplifyIntrinsics.cpp
@@ -31,8 +31,8 @@
#include "flang/Optimizer/Dialect/FIRType.h"
#include "flang/Optimizer/Dialect/Support/FIRContext.h"
#include "flang/Optimizer/HLFIR/HLFIRDialect.h"
-#include "flang/Optimizer/Support/Utils.h"
#include "flang/Optimizer/Transforms/Passes.h"
+#include "flang/Optimizer/Transforms/Utils.h"
#include "flang/Runtime/entry-names.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/IR/Matchers.h"
@@ -558,6 +558,135 @@ static mlir::FunctionType genRuntimeMinlocType(fir::FirOpBuilder &builder,
{boxRefType, boxType, boxType}, {});
}
+// Produces a loop nest for a Minloc intrinsic.
+void fir::genMinMaxlocReductionLoop(
+ fir::FirOpBuilder &builder, mlir::Value array,
+ fir::InitValGeneratorTy initVal, fir::MinlocBodyOpGeneratorTy genBody,
+ fir::AddrGeneratorTy getAddrFn, unsigned rank, mlir::Type elementType,
+ mlir::Location loc, mlir::Type maskElemType, mlir::Value resultArr,
+ bool maskMayBeLogicalScalar) {
+ mlir::IndexType idxTy = builder.getIndexType();
+
+ mlir::Value zeroIdx = builder.createIntegerConstant(loc, idxTy, 0);
+
+ fir::SequenceType::Shape flatShape(rank,
+ fir::SequenceType::getUnknownExtent());
+ mlir::Type arrTy = fir::SequenceType::get(flatShape, elementType);
+ mlir::Type boxArrTy = fir::BoxType::get(arrTy);
+ array = builder.create<fir::ConvertOp>(loc, boxArrTy, array);
+
+ mlir::Type resultElemType = hlfir::getFortranElementType(resultArr.getType());
+ mlir::Value flagSet = builder.createIntegerConstant(loc, resultElemType, 1);
+ mlir::Value zero = builder.createIntegerConstant(loc, resultElemType, 0);
+ mlir::Value flagRef = builder.createTemporary(loc, resultElemType);
+ builder.create<fir::StoreOp>(loc, zero, flagRef);
+
+ mlir::Value init = initVal(builder, loc, elementType);
+ llvm::SmallVector<mlir::Value, Fortran::common::maxRank> bounds;
+
+ assert(rank > 0 && "rank cannot be zero");
+ mlir::Value one = builder.createIntegerConstant(loc, idxTy, 1);
+
+ // Compute all the upper bounds before the loop nest.
+ // It is not strictly necessary for performance, since the loop nest
+ // does not have any store operations and any LICM optimization
+ // should be able to optimize the redundancy.
+ for (unsigned i = 0; i < rank; ++i) {
+ mlir::Value dimIdx = builder.createIntegerConstant(loc, idxTy, i);
+ auto dims =
+ builder.create<fir::BoxDimsOp>(loc, idxTy, idxTy, idxTy, array, dimIdx);
+ mlir::Value len = dims.getResult(1);
+ // We use C indexing here, so len-1 as loopcount
+ mlir::Value loopCount = builder.create<mlir::arith::SubIOp>(loc, len, one);
+ bounds.push_back(loopCount);
+ }
+ // Create a loop nest consisting of OP operations.
+ // Collect the loops' induction variables into indices array,
+ // which will be used in the innermost loop to load the input
+ // array's element.
+ // The loops are generated such that the innermost loop processes
+ // the 0 dimension.
+ llvm::SmallVector<mlir::Value, Fortran::common::maxRank> indices;
+ for (unsigned i = rank; 0 < i; --i) {
+ mlir::Value step = one;
+ mlir::Value loopCount = bounds[i - 1];
+ auto loop =
+ builder.create<fir::DoLoopOp>(loc, zeroIdx, loopCount, step, false,
+ /*finalCountValue=*/false, init);
+ init = loop.getRegionIterArgs()[0];
+ indices.push_back(loop.getInductionVar());
+ // Set insertion point to the loop body so that the next loop
+ // is inserted inside the current one.
+ builder.setInsertionPointToStart(loop.getBody());
+ }
+
+ // Reverse the indices such that they are ordered as:
+ // <dim-0-idx, dim-1-idx, ...>
+ std::reverse(indices.begin(), indices.end());
+ mlir::Value reductionVal =
+ genBody(builder, loc, elementType, array, flagRef, init, indices);
+
+ // Unwind the loop nest and insert ResultOp on each level
+ // to return the updated value of the reduction to the enclosing
+ // loops.
+ for (unsigned i = 0; i < rank; ++i) {
+ auto result = builder.create<fir::ResultOp>(loc, reductionVal);
+ // Proceed to the outer loop.
+ auto loop = mlir::cast<fir::DoLoopOp>(result->getParentOp());
+ reductionVal = loop.getResult(0);
+ // Set insertion point after the loop operation that we have
+ // just processed.
+ builder.setInsertionPointAfter(loop.getOperation());
+ }
+ // End of loop nest. The insertion point is after the outermost loop.
+ if (maskMayBeLogicalScalar) {
+ if (fir::IfOp ifOp =
+ mlir::dyn_cast<fir::IfOp>(builder.getBlock()->getParentOp())) {
+ builder.create<fir::ResultOp>(loc, reductionVal);
+ builder.setInsertionPointAfter(ifOp);
+ // Redefine flagSet to escape scope of ifOp
+ flagSet = builder.createIntegerConstant(loc, resultElemType, 1);
+ reductionVal = ifOp.getResult(0);
+ }
+ }
+
+ // Check for case where array was full of max values.
+ // flag will be 0 if mask was never true, 1 if mask was true as some point,
+ // this is needed to avoid catching cases where we didn't access any elements
+ // e.g. mask=.FALSE.
+ mlir::Value flagValue =
+ builder.create<fir::LoadOp>(loc, resultElemType, flagRef);
+ mlir::Value flagCmp = builder.create<mlir::arith::CmpIOp>(
+ loc, mlir::arith::CmpIPredicate::eq, flagValue, flagSet);
+ fir::IfOp ifMaskTrueOp =
+ builder.create<fir::IfOp>(loc, flagCmp, /*withElseRegion=*/false);
+ builder.setInsertionPointToStart(&ifMaskTrueOp.getThenRegion().front());
+
+ mlir::Value testInit = initVal(builder, loc, elementType);
+ fir::IfOp ifMinSetOp;
+ if (elementType.isa<mlir::FloatType>()) {
+ mlir::Value cmp = builder.create<mlir::arith::CmpFOp>(
+ loc, mlir::arith::CmpFPredicate::OEQ, testInit, reductionVal);
+ ifMinSetOp = builder.create<fir::IfOp>(loc, cmp,
+ /*withElseRegion*/ false);
+ } else {
+ mlir::Value cmp = builder.create<mlir::arith::CmpIOp>(
+ loc, mlir::arith::CmpIPredicate::eq, testInit, reductionVal);
+ ifMinSetOp = builder.create<fir::IfOp>(loc, cmp,
+ /*withElseRegion*/ false);
+ }
+ builder.setInsertionPointToStart(&ifMinSetOp.getThenRegion().front());
+
+ // Load output array with 1s instead of 0s
+ for (unsigned int i = 0; i < rank; ++i) {
+ mlir::Value index = builder.createIntegerConstant(loc, idxTy, i);
+ mlir::Value resultElemAddr =
+ getAddrFn(builder, loc, resultElemType, resultArr, index);
+ builder.create<fir::StoreOp>(loc, flagSet, resultElemAddr);
+ }
+ builder.setInsertionPointAfter(ifMaskTrueOp);
+}
+
static void genRuntimeMinMaxlocBody(fir::FirOpBuilder &builder,
mlir::func::FuncOp &funcOp, bool isMax,
unsigned rank, int maskRank,
diff --git a/flang/lib/Semantics/canonicalize-omp.cpp b/flang/lib/Semantics/canonicalize-omp.cpp
index 01adcf5..0481b3d 100644
--- a/flang/lib/Semantics/canonicalize-omp.cpp
+++ b/flang/lib/Semantics/canonicalize-omp.cpp
@@ -92,7 +92,7 @@ private:
nextIt = it;
while (++nextIt != block.end()) {
// Ignore compiler directives.
- if (auto *directive{GetConstructIf<parser::CompilerDirective>(*nextIt)})
+ if (GetConstructIf<parser::CompilerDirective>(*nextIt))
continue;
if (auto *doCons{GetConstructIf<parser::DoConstruct>(*nextIt)}) {
diff --git a/flang/module/iso_c_binding.f90 b/flang/module/iso_c_binding.f90
index 9a7e68f..1661fd5 100644
--- a/flang/module/iso_c_binding.f90
+++ b/flang/module/iso_c_binding.f90
@@ -47,7 +47,11 @@ module iso_c_binding
c_long_long = c_int64_t, &
c_signed_char = c_int8_t, &
c_size_t = kind(c_sizeof(1)), &
+#if __powerpc__
+ c_intmax_t = c_int64_t, &
+#else
c_intmax_t = c_int128_t, &
+#endif
c_intptr_t = c_size_t, &
c_ptrdiff_t = c_size_t
integer, parameter, public :: &
diff --git a/flang/runtime/assign.cpp b/flang/runtime/assign.cpp
index 879b413..25d2ba4 100644
--- a/flang/runtime/assign.cpp
+++ b/flang/runtime/assign.cpp
@@ -320,6 +320,8 @@ RT_API_ATTRS static void Assign(
if ((flags & NeedFinalization) && toDerived) {
Finalize(to, *toDerived, &terminator);
flags &= ~NeedFinalization;
+ } else if (toDerived && !toDerived->noDestructionNeeded()) {
+ Destroy(to, /*finalize=*/false, *toDerived, &terminator);
}
} else {
to.Destroy((flags & NeedFinalization) != 0, /*destroyPointers=*/false,
@@ -389,6 +391,8 @@ RT_API_ATTRS static void Assign(
// The target is first finalized if still necessary (7.5.6.3(1))
if (flags & NeedFinalization) {
Finalize(to, *updatedToDerived, &terminator);
+ } else if (updatedToDerived && !updatedToDerived->noDestructionNeeded()) {
+ Destroy(to, /*finalize=*/false, *updatedToDerived, &terminator);
}
// Copy the data components (incl. the parent) first.
const Descriptor &componentDesc{updatedToDerived->component()};
diff --git a/flang/runtime/derived.cpp b/flang/runtime/derived.cpp
index 8a0d0ab..67eb901 100644
--- a/flang/runtime/derived.cpp
+++ b/flang/runtime/derived.cpp
@@ -17,6 +17,19 @@ namespace Fortran::runtime {
RT_OFFLOAD_API_GROUP_BEGIN
+// Fill "extents" array with the extents of component "comp" from derived type
+// instance "derivedInstance".
+static RT_API_ATTRS void GetComponentExtents(SubscriptValue (&extents)[maxRank],
+ const typeInfo::Component &comp, const Descriptor &derivedInstance) {
+ const typeInfo::Value *bounds{comp.bounds()};
+ for (int dim{0}; dim < comp.rank(); ++dim) {
+ SubscriptValue lb{bounds[2 * dim].GetValue(&derivedInstance).value_or(0)};
+ SubscriptValue ub{
+ bounds[2 * dim + 1].GetValue(&derivedInstance).value_or(0)};
+ extents[dim] = ub >= lb ? ub - lb + 1 : 0;
+ }
+}
+
RT_API_ATTRS int Initialize(const Descriptor &instance,
const typeInfo::DerivedType &derived, Terminator &terminator, bool hasStat,
const Descriptor *errMsg) {
@@ -77,22 +90,15 @@ RT_API_ATTRS int Initialize(const Descriptor &instance,
comp.derivedType() && !comp.derivedType()->noInitializationNeeded()) {
// Default initialization of non-pointer non-allocatable/automatic
// data component. Handles parent component's elements. Recursive.
- SubscriptValue extent[maxRank];
- const typeInfo::Value *bounds{comp.bounds()};
- for (int dim{0}; dim < comp.rank(); ++dim) {
- typeInfo::TypeParameterValue lb{
- bounds[2 * dim].GetValue(&instance).value_or(0)};
- typeInfo::TypeParameterValue ub{
- bounds[2 * dim + 1].GetValue(&instance).value_or(0)};
- extent[dim] = ub >= lb ? ub - lb + 1 : 0;
- }
+ SubscriptValue extents[maxRank];
+ GetComponentExtents(extents, comp, instance);
StaticDescriptor<maxRank, true, 0> staticDescriptor;
Descriptor &compDesc{staticDescriptor.descriptor()};
const typeInfo::DerivedType &compType{*comp.derivedType()};
for (std::size_t j{0}; j++ < elements; instance.IncrementSubscripts(at)) {
compDesc.Establish(compType,
instance.ElementComponent<char>(at, comp.offset()), comp.rank(),
- extent);
+ extents);
stat = Initialize(compDesc, compType, terminator, hasStat, errMsg);
if (stat != StatOk) {
break;
@@ -253,14 +259,8 @@ RT_API_ATTRS void Finalize(const Descriptor &descriptor,
}
} else if (comp.genre() == typeInfo::Component::Genre::Data &&
comp.derivedType() && !comp.derivedType()->noFinalizationNeeded()) {
- SubscriptValue extent[maxRank];
- const typeInfo::Value *bounds{comp.bounds()};
- for (int dim{0}; dim < comp.rank(); ++dim) {
- SubscriptValue lb{bounds[2 * dim].GetValue(&descriptor).value_or(0)};
- SubscriptValue ub{
- bounds[2 * dim + 1].GetValue(&descriptor).value_or(0)};
- extent[dim] = ub >= lb ? ub - lb + 1 : 0;
- }
+ SubscriptValue extents[maxRank];
+ GetComponentExtents(extents, comp, descriptor);
StaticDescriptor<maxRank, true, 0> staticDescriptor;
Descriptor &compDesc{staticDescriptor.descriptor()};
const typeInfo::DerivedType &compType{*comp.derivedType()};
@@ -268,7 +268,7 @@ RT_API_ATTRS void Finalize(const Descriptor &descriptor,
descriptor.IncrementSubscripts(at)) {
compDesc.Establish(compType,
descriptor.ElementComponent<char>(at, comp.offset()), comp.rank(),
- extent);
+ extents);
Finalize(compDesc, compType, terminator);
}
}
@@ -296,6 +296,8 @@ RT_API_ATTRS void Destroy(const Descriptor &descriptor, bool finalize,
if (finalize && !derived.noFinalizationNeeded()) {
Finalize(descriptor, derived, terminator);
}
+ // Deallocate all direct and indirect allocatable and automatic components.
+ // Contrary to finalization, the order of deallocation does not matter.
const Descriptor &componentDesc{derived.component()};
std::size_t myComponents{componentDesc.Elements()};
std::size_t elements{descriptor.Elements()};
@@ -304,14 +306,33 @@ RT_API_ATTRS void Destroy(const Descriptor &descriptor, bool finalize,
for (std::size_t k{0}; k < myComponents; ++k) {
const auto &comp{
*componentDesc.ZeroBasedIndexedElement<typeInfo::Component>(k)};
+ const bool destroyComp{
+ comp.derivedType() && !comp.derivedType()->noDestructionNeeded()};
if (comp.genre() == typeInfo::Component::Genre::Allocatable ||
comp.genre() == typeInfo::Component::Genre::Automatic) {
for (std::size_t j{0}; j < elements; ++j) {
Descriptor *d{
descriptor.ElementComponent<Descriptor>(at, comp.offset())};
+ if (destroyComp) {
+ Destroy(*d, /*finalize=*/false, *comp.derivedType(), terminator);
+ }
d->Deallocate();
descriptor.IncrementSubscripts(at);
}
+ } else if (destroyComp &&
+ comp.genre() == typeInfo::Component::Genre::Data) {
+ SubscriptValue extents[maxRank];
+ GetComponentExtents(extents, comp, descriptor);
+ StaticDescriptor<maxRank, true, 0> staticDescriptor;
+ Descriptor &compDesc{staticDescriptor.descriptor()};
+ const typeInfo::DerivedType &compType{*comp.derivedType()};
+ for (std::size_t j{0}; j++ < elements;
+ descriptor.IncrementSubscripts(at)) {
+ compDesc.Establish(compType,
+ descriptor.ElementComponent<char>(at, comp.offset()), comp.rank(),
+ extents);
+ Destroy(compDesc, /*finalize=*/false, *comp.derivedType(), terminator);
+ }
}
}
}
diff --git a/flang/test/Driver/masm.f90 b/flang/test/Driver/masm.f90
new file mode 100644
index 0000000..c5c44ef
--- /dev/null
+++ b/flang/test/Driver/masm.f90
@@ -0,0 +1,10 @@
+! RUN: %flang --target=x86_64-unknown-linux -masm=intel -S %s -### 2>&1 | FileCheck --check-prefix=CHECK-INTEL %s
+! RUN: %flang --target=x86_64-unknown-linux -masm=att -S %s -### 2>&1 | FileCheck --check-prefix=CHECK-ATT %s
+! RUN: not %flang --target=x86_64-unknown-linux -S -masm=somerequired %s -### 2>&1 | FileCheck --check-prefix=CHECK-SOMEREQUIRED %s
+! RUN: %flang --target=aarch64-unknown-eabi -S -masm=intel %s -### 2>&1 | FileCheck --check-prefix=CHECK-AARCH64 %s
+
+! CHECK-INTEL: "-mllvm" "-x86-asm-syntax=intel"
+! CHECK-ATT: "-mllvm" "-x86-asm-syntax=att"
+! CHECK-SOMEREQUIRED: error: unsupported argument 'somerequired' to option '-masm='
+! CHECK-AARCH64: warning: argument unused during compilation: '-masm=intel'
+! CHECK-AARCH64-NOT: -x86-asm-syntax=intel
diff --git a/flang/test/Driver/predefined-macros-powerpc.f90 b/flang/test/Driver/predefined-macros-powerpc.f90
new file mode 100644
index 0000000..b3d2b61
--- /dev/null
+++ b/flang/test/Driver/predefined-macros-powerpc.f90
@@ -0,0 +1,11 @@
+! Test predefined macro for PowerPC architecture
+
+! RUN: %flang_fc1 -cpp -E %s | FileCheck %s
+! REQUIRES: target=powerpc{{.*}}
+
+! CHECK: integer :: var1 = 1
+
+#if __powerpc__
+ integer :: var1 = __powerpc__
+#endif
+end program
diff --git a/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir b/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir
index beb399e..a1fc614 100644
--- a/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir
+++ b/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir
@@ -701,10 +701,17 @@ func.func @_QPsb() {
// CHECK-SAME: %[[ARRAY_REF:.*]]: !llvm.ptr
// CHECK: %[[RED_ACCUMULATOR:.*]] = llvm.alloca %2 x i32 {bindc_name = "x"} : (i64) -> !llvm.ptr
// CHECK: omp.parallel {
-// CHECK: omp.wsloop reduction(@[[EQV_REDUCTION]] -> %[[RED_ACCUMULATOR]] : !llvm.ptr) for
+// CHECK: omp.wsloop reduction(@[[EQV_REDUCTION]] %[[RED_ACCUMULATOR]] -> %[[PRV:.+]] : !llvm.ptr) for
// CHECK: %[[ARRAY_ELEM_REF:.*]] = llvm.getelementptr %[[ARRAY_REF]][0, %{{.*}}] : (!llvm.ptr, i64) -> !llvm.ptr
// CHECK: %[[ARRAY_ELEM:.*]] = llvm.load %[[ARRAY_ELEM_REF]] : !llvm.ptr -> i32
-// CHECK: omp.reduction %[[ARRAY_ELEM]], %[[RED_ACCUMULATOR]] : i32, !llvm.ptr
+// CHECK: %[[LPRV:.+]] = llvm.load %[[PRV]] : !llvm.ptr -> i32
+// CHECK: %[[ZERO_1:.*]] = llvm.mlir.constant(0 : i64) : i32
+// CHECK: %[[ARGVAL_1:.*]] = llvm.icmp "ne" %[[LPRV]], %[[ZERO_1]] : i32
+// CHECK: %[[ZERO_2:.*]] = llvm.mlir.constant(0 : i64) : i32
+// CHECK: %[[ARGVAL_2:.*]] = llvm.icmp "ne" %[[ARRAY_ELEM]], %[[ZERO_2]] : i32
+// CHECK: %[[RES:.*]] = llvm.icmp "eq" %[[ARGVAL_2]], %[[ARGVAL_1]] : i1
+// CHECK: %[[RES_EXT:.*]] = llvm.zext %[[RES]] : i1 to i32
+// CHECK: llvm.store %[[RES_EXT]], %[[PRV]] : i32, !llvm.ptr
// CHECK: omp.yield
// CHECK: omp.terminator
// CHECK: llvm.return
@@ -733,7 +740,7 @@ func.func @_QPsimple_reduction(%arg0: !fir.ref<!fir.array<100x!fir.logical<4>>>
%c1_i32 = arith.constant 1 : i32
%c100_i32 = arith.constant 100 : i32
%c1_i32_0 = arith.constant 1 : i32
- omp.wsloop reduction(@eqv_reduction -> %1 : !fir.ref<!fir.logical<4>>) for (%arg1) : i32 = (%c1_i32) to (%c100_i32) inclusive step (%c1_i32_0) {
+ omp.wsloop reduction(@eqv_reduction %1 -> %prv : !fir.ref<!fir.logical<4>>) for (%arg1) : i32 = (%c1_i32) to (%c100_i32) inclusive step (%c1_i32_0) {
fir.store %arg1 to %3 : !fir.ref<i32>
%4 = fir.load %3 : !fir.ref<i32>
%5 = fir.convert %4 : (i32) -> i64
@@ -741,7 +748,12 @@ func.func @_QPsimple_reduction(%arg0: !fir.ref<!fir.array<100x!fir.logical<4>>>
%6 = arith.subi %5, %c1_i64 : i64
%7 = fir.coordinate_of %arg0, %6 : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
%8 = fir.load %7 : !fir.ref<!fir.logical<4>>
- omp.reduction %8, %1 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+ %lprv = fir.load %prv : !fir.ref<!fir.logical<4>>
+ %lprv1 = fir.convert %lprv : (!fir.logical<4>) -> i1
+ %9 = fir.convert %8 : (!fir.logical<4>) -> i1
+ %10 = arith.cmpi eq, %9, %lprv1 : i1
+ %11 = fir.convert %10 : (i1) -> !fir.logical<4>
+ fir.store %11 to %prv : !fir.ref<!fir.logical<4>>
omp.yield
}
omp.terminator
diff --git a/flang/test/Lower/CUDA/cuda-proc-attribute.cuf b/flang/test/Lower/CUDA/cuda-proc-attribute.cuf
index 0507310..d9765f6 100644
--- a/flang/test/Lower/CUDA/cuda-proc-attribute.cuf
+++ b/flang/test/Lower/CUDA/cuda-proc-attribute.cuf
@@ -32,3 +32,12 @@ attributes(host) attributes(device) integer function fct_host_device; end
attributes(device) attributes(host) integer function fct_device_host; end
! CHECK: func.func @_QPfct_device_host() -> i32 attributes {fir.cuda_attr = #fir.cuda_proc<host_device>}
+
+attributes(global) launch_bounds(1, 2) subroutine sub_lbounds1(); end
+! CHECK: func.func @_QPsub_lbounds1() attributes {fir.cuda_attr = #fir.cuda_proc<global>, fir.cuda_launch_bounds = #fir.launch_bounds<maxTPB = 1 : i64, minBPM = 2 : i64>}
+
+attributes(global) launch_bounds(1, 2, 3) subroutine sub_lbounds2(); end
+! CHECK: func.func @_QPsub_lbounds2() attributes {fir.cuda_attr = #fir.cuda_proc<global>, fir.cuda_launch_bounds = #fir.launch_bounds<maxTPB = 1 : i64, minBPM = 2 : i64, upperBoundClusterSize = 3 : i64>}
+
+attributes(global) cluster_dims(1, 2, 3) subroutine sub_clusterdims1(); end
+! CHECK: func.func @_QPsub_clusterdims1() attributes {fir.cuda_attr = #fir.cuda_proc<global>, fir.cuda_cluster_dims = #fir.cluster_dims<x = 1 : i64, y = 2 : i64, z = 3 : i64>}
diff --git a/flang/test/Lower/OpenMP/FIR/wsloop-reduction-add.f90 b/flang/test/Lower/OpenMP/FIR/wsloop-reduction-add.f90
index 62d9af3..56645294 100644
--- a/flang/test/Lower/OpenMP/FIR/wsloop-reduction-add.f90
+++ b/flang/test/Lower/OpenMP/FIR/wsloop-reduction-add.f90
@@ -1,66 +1,79 @@
! RUN: bbc -emit-fir -hlfir=false -fopenmp %s -o - | FileCheck %s
! RUN: %flang_fc1 -emit-fir -flang-deprecated-no-hlfir -fopenmp %s -o - | FileCheck %s
+! NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
-!CHECK-LABEL: omp.reduction.declare
-!CHECK-SAME: @[[RED_F64_NAME:.*]] : f64 init {
-!CHECK: ^bb0(%{{.*}}: f64):
-!CHECK: %[[C0_1:.*]] = arith.constant 0.000000e+00 : f64
-!CHECK: omp.yield(%[[C0_1]] : f64)
-!CHECK: } combiner {
-!CHECK: ^bb0(%[[ARG0:.*]]: f64, %[[ARG1:.*]]: f64):
-!CHECK: %[[RES:.*]] = arith.addf %[[ARG0]], %[[ARG1]] {{.*}}: f64
-!CHECK: omp.yield(%[[RES]] : f64)
-!CHECK: }
+! The script is designed to make adding checks to
+! a test case fast, it is *not* designed to be authoritative
+! about what constitutes a good test! The CHECK should be
+! minimized and named to reflect the test intent.
-!CHECK-LABEL: omp.reduction.declare
-!CHECK-SAME: @[[RED_I64_NAME:.*]] : i64 init {
-!CHECK: ^bb0(%{{.*}}: i64):
-!CHECK: %[[C0_1:.*]] = arith.constant 0 : i64
-!CHECK: omp.yield(%[[C0_1]] : i64)
-!CHECK: } combiner {
-!CHECK: ^bb0(%[[ARG0:.*]]: i64, %[[ARG1:.*]]: i64):
-!CHECK: %[[RES:.*]] = arith.addi %[[ARG0]], %[[ARG1]] : i64
-!CHECK: omp.yield(%[[RES]] : i64)
-!CHECK: }
+! CHECK-LABEL: omp.reduction.declare @add_reduction_f_64 : f64 init {
+! CHECK: ^bb0(%[[VAL_0:.*]]: f64):
+! CHECK: %[[VAL_1:.*]] = arith.constant 0.000000e+00 : f64
+! CHECK: omp.yield(%[[VAL_1]] : f64)
-!CHECK-LABEL: omp.reduction.declare
-!CHECK-SAME: @[[RED_F32_NAME:.*]] : f32 init {
-!CHECK: ^bb0(%{{.*}}: f32):
-!CHECK: %[[C0_1:.*]] = arith.constant 0.000000e+00 : f32
-!CHECK: omp.yield(%[[C0_1]] : f32)
-!CHECK: } combiner {
-!CHECK: ^bb0(%[[ARG0:.*]]: f32, %[[ARG1:.*]]: f32):
-!CHECK: %[[RES:.*]] = arith.addf %[[ARG0]], %[[ARG1]] {{.*}}: f32
-!CHECK: omp.yield(%[[RES]] : f32)
-!CHECK: }
+! CHECK-LABEL: } combiner {
+! CHECK: ^bb0(%[[VAL_0:.*]]: f64, %[[VAL_1:.*]]: f64):
+! CHECK: %[[VAL_2:.*]] = arith.addf %[[VAL_0]], %[[VAL_1]] fastmath<contract> : f64
+! CHECK: omp.yield(%[[VAL_2]] : f64)
+! CHECK: }
-!CHECK-LABEL: omp.reduction.declare
-!CHECK-SAME: @[[RED_I32_NAME:.*]] : i32 init {
-!CHECK: ^bb0(%{{.*}}: i32):
-!CHECK: %[[C0_1:.*]] = arith.constant 0 : i32
-!CHECK: omp.yield(%[[C0_1]] : i32)
-!CHECK: } combiner {
-!CHECK: ^bb0(%[[ARG0:.*]]: i32, %[[ARG1:.*]]: i32):
-!CHECK: %[[RES:.*]] = arith.addi %[[ARG0]], %[[ARG1]] : i32
-!CHECK: omp.yield(%[[RES]] : i32)
-!CHECK: }
+! CHECK-LABEL: omp.reduction.declare @add_reduction_i_64 : i64 init {
+! CHECK: ^bb0(%[[VAL_0:.*]]: i64):
+! CHECK: %[[VAL_1:.*]] = arith.constant 0 : i64
+! CHECK: omp.yield(%[[VAL_1]] : i64)
+
+! CHECK-LABEL: } combiner {
+! CHECK: ^bb0(%[[VAL_0:.*]]: i64, %[[VAL_1:.*]]: i64):
+! CHECK: %[[VAL_2:.*]] = arith.addi %[[VAL_0]], %[[VAL_1]] : i64
+! CHECK: omp.yield(%[[VAL_2]] : i64)
+! CHECK: }
+
+! CHECK-LABEL: omp.reduction.declare @add_reduction_f_32 : f32 init {
+! CHECK: ^bb0(%[[VAL_0:.*]]: f32):
+! CHECK: %[[VAL_1:.*]] = arith.constant 0.000000e+00 : f32
+! CHECK: omp.yield(%[[VAL_1]] : f32)
+
+! CHECK-LABEL: } combiner {
+! CHECK: ^bb0(%[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: f32):
+! CHECK: %[[VAL_2:.*]] = arith.addf %[[VAL_0]], %[[VAL_1]] fastmath<contract> : f32
+! CHECK: omp.yield(%[[VAL_2]] : f32)
+! CHECK: }
+
+! CHECK-LABEL: omp.reduction.declare @add_reduction_i_32 : i32 init {
+! CHECK: ^bb0(%[[VAL_0:.*]]: i32):
+! CHECK: %[[VAL_1:.*]] = arith.constant 0 : i32
+! CHECK: omp.yield(%[[VAL_1]] : i32)
+
+! CHECK-LABEL: } combiner {
+! CHECK: ^bb0(%[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: i32):
+! CHECK: %[[VAL_2:.*]] = arith.addi %[[VAL_0]], %[[VAL_1]] : i32
+! CHECK: omp.yield(%[[VAL_2]] : i32)
+! CHECK: }
+
+! CHECK-LABEL: func.func @_QPsimple_int_reduction() {
+! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_int_reductionEi"}
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFsimple_int_reductionEx"}
+! CHECK: %[[VAL_2:.*]] = arith.constant 0 : i32
+! CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref<i32>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_3:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_4:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_5:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_6:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@add_reduction_i_32 %[[VAL_1]] -> %[[VAL_7:.*]] : !fir.ref<i32>) for (%[[VAL_8:.*]]) : i32 = (%[[VAL_4]]) to (%[[VAL_5]]) inclusive step (%[[VAL_6]]) {
+! CHECK: fir.store %[[VAL_8]] to %[[VAL_3]] : !fir.ref<i32>
+! CHECK: %[[VAL_9:.*]] = fir.load %[[VAL_7]] : !fir.ref<i32>
+! CHECK: %[[VAL_10:.*]] = fir.load %[[VAL_3]] : !fir.ref<i32>
+! CHECK: %[[VAL_11:.*]] = arith.addi %[[VAL_9]], %[[VAL_10]] : i32
+! CHECK: fir.store %[[VAL_11]] to %[[VAL_7]] : !fir.ref<i32>
+! CHECK: omp.yield
+! CHECK: }
+! CHECK: omp.terminator
+! CHECK: }
+! CHECK: return
+! CHECK: }
-!CHECK-LABEL: func.func @_QPsimple_int_reduction
-!CHECK: %[[XREF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFsimple_int_reductionEx"}
-!CHECK: %[[C0_2:.*]] = arith.constant 0 : i32
-!CHECK: fir.store %[[C0_2]] to %[[XREF]] : !fir.ref<i32>
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_I32_NAME]] -> %[[XREF]] : !fir.ref<i32>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]])
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: omp.reduction %[[I_PVT_VAL]], %[[XREF]] : i32, !fir.ref<i32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
subroutine simple_int_reduction
integer :: x
x = 0
@@ -73,23 +86,31 @@ subroutine simple_int_reduction
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPsimple_real_reduction
-!CHECK: %[[XREF:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFsimple_real_reductionEx"}
-!CHECK: %[[C0_2:.*]] = arith.constant 0.000000e+00 : f32
-!CHECK: fir.store %[[C0_2]] to %[[XREF]] : !fir.ref<f32>
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_F32_NAME]] -> %[[XREF]] : !fir.ref<f32>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]])
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL_i32:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL_f32:.*]] = fir.convert %[[I_PVT_VAL_i32]] : (i32) -> f32
-!CHECK: omp.reduction %[[I_PVT_VAL_f32]], %[[XREF]] : f32, !fir.ref<f32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+
+! CHECK-LABEL: func.func @_QPsimple_real_reduction() {
+! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_real_reductionEi"}
+! CHECK: %[[VAL_1:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFsimple_real_reductionEx"}
+! CHECK: %[[VAL_2:.*]] = arith.constant 0.000000e+00 : f32
+! CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref<f32>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_3:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_4:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_5:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_6:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@add_reduction_f_32 %[[VAL_1]] -> %[[VAL_7:.*]] : !fir.ref<f32>) for (%[[VAL_8:.*]]) : i32 = (%[[VAL_4]]) to (%[[VAL_5]]) inclusive step (%[[VAL_6]]) {
+! CHECK: fir.store %[[VAL_8]] to %[[VAL_3]] : !fir.ref<i32>
+! CHECK: %[[VAL_9:.*]] = fir.load %[[VAL_7]] : !fir.ref<f32>
+! CHECK: %[[VAL_10:.*]] = fir.load %[[VAL_3]] : !fir.ref<i32>
+! CHECK: %[[VAL_11:.*]] = fir.convert %[[VAL_10]] : (i32) -> f32
+! CHECK: %[[VAL_12:.*]] = arith.addf %[[VAL_9]], %[[VAL_11]] fastmath<contract> : f32
+! CHECK: fir.store %[[VAL_12]] to %[[VAL_7]] : !fir.ref<f32>
+! CHECK: omp.yield
+! CHECK: }
+! CHECK: omp.terminator
+! CHECK: }
+! CHECK: return
+! CHECK: }
+
subroutine simple_real_reduction
real :: x
x = 0.0
@@ -102,22 +123,29 @@ subroutine simple_real_reduction
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPsimple_int_reduction_switch_order
-!CHECK: %[[XREF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFsimple_int_reduction_switch_orderEx"}
-!CHECK: %[[C0_2:.*]] = arith.constant 0 : i32
-!CHECK: fir.store %[[C0_2]] to %[[XREF]] : !fir.ref<i32>
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_I32_NAME]] -> %[[XREF]] : !fir.ref<i32>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]])
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: omp.reduction %[[I_PVT_VAL]], %[[XREF]] : i32, !fir.ref<i32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+! CHECK-LABEL: func.func @_QPsimple_int_reduction_switch_order() {
+! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_int_reduction_switch_orderEi"}
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFsimple_int_reduction_switch_orderEx"}
+! CHECK: %[[VAL_2:.*]] = arith.constant 0 : i32
+! CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref<i32>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_3:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_4:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_5:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_6:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@add_reduction_i_32 %[[VAL_1]] -> %[[VAL_7:.*]] : !fir.ref<i32>) for (%[[VAL_8:.*]]) : i32 = (%[[VAL_4]]) to (%[[VAL_5]]) inclusive step (%[[VAL_6]]) {
+! CHECK: fir.store %[[VAL_8]] to %[[VAL_3]] : !fir.ref<i32>
+! CHECK: %[[VAL_9:.*]] = fir.load %[[VAL_3]] : !fir.ref<i32>
+! CHECK: %[[VAL_10:.*]] = fir.load %[[VAL_7]] : !fir.ref<i32>
+! CHECK: %[[VAL_11:.*]] = arith.addi %[[VAL_9]], %[[VAL_10]] : i32
+! CHECK: fir.store %[[VAL_11]] to %[[VAL_7]] : !fir.ref<i32>
+! CHECK: omp.yield
+! CHECK: }
+! CHECK: omp.terminator
+! CHECK: }
+! CHECK: return
+! CHECK: }
+
subroutine simple_int_reduction_switch_order
integer :: x
x = 0
@@ -130,23 +158,30 @@ subroutine simple_int_reduction_switch_order
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPsimple_real_reduction_switch_order
-!CHECK: %[[XREF:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFsimple_real_reduction_switch_orderEx"}
-!CHECK: %[[C0_2:.*]] = arith.constant 0.000000e+00 : f32
-!CHECK: fir.store %[[C0_2]] to %[[XREF]] : !fir.ref<f32>
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_F32_NAME]] -> %[[XREF]] : !fir.ref<f32>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]])
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL_i32:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL_f32:.*]] = fir.convert %[[I_PVT_VAL_i32]] : (i32) -> f32
-!CHECK: omp.reduction %[[I_PVT_VAL_f32]], %[[XREF]] : f32, !fir.ref<f32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+! CHECK-LABEL: func.func @_QPsimple_real_reduction_switch_order() {
+! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_real_reduction_switch_orderEi"}
+! CHECK: %[[VAL_1:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFsimple_real_reduction_switch_orderEx"}
+! CHECK: %[[VAL_2:.*]] = arith.constant 0.000000e+00 : f32
+! CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref<f32>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_3:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_4:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_5:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_6:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@add_reduction_f_32 %[[VAL_1]] -> %[[VAL_7:.*]] : !fir.ref<f32>) for (%[[VAL_8:.*]]) : i32 = (%[[VAL_4]]) to (%[[VAL_5]]) inclusive step (%[[VAL_6]]) {
+! CHECK: fir.store %[[VAL_8]] to %[[VAL_3]] : !fir.ref<i32>
+! CHECK: %[[VAL_9:.*]] = fir.load %[[VAL_3]] : !fir.ref<i32>
+! CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_9]] : (i32) -> f32
+! CHECK: %[[VAL_11:.*]] = fir.load %[[VAL_7]] : !fir.ref<f32>
+! CHECK: %[[VAL_12:.*]] = arith.addf %[[VAL_10]], %[[VAL_11]] fastmath<contract> : f32
+! CHECK: fir.store %[[VAL_12]] to %[[VAL_7]] : !fir.ref<f32>
+! CHECK: omp.yield
+! CHECK: }
+! CHECK: omp.terminator
+! CHECK: }
+! CHECK: return
+! CHECK: }
+
subroutine simple_real_reduction_switch_order
real :: x
x = 0.0
@@ -159,23 +194,43 @@ subroutine simple_real_reduction_switch_order
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPmultiple_int_reductions_same_type
-!CHECK: %[[XREF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFmultiple_int_reductions_same_typeEx"}
-!CHECK: %[[YREF:.*]] = fir.alloca i32 {bindc_name = "y", uniq_name = "_QFmultiple_int_reductions_same_typeEy"}
-!CHECK: %[[ZREF:.*]] = fir.alloca i32 {bindc_name = "z", uniq_name = "_QFmultiple_int_reductions_same_typeEz"}
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: omp.wsloop reduction(@[[RED_I32_NAME]] -> %[[XREF]] : !fir.ref<i32>, @[[RED_I32_NAME]] -> %[[YREF]] : !fir.ref<i32>, @[[RED_I32_NAME]] -> %[[ZREF]] : !fir.ref<i32>) for (%[[IVAL]]) : i32
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL1:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: omp.reduction %[[I_PVT_VAL1]], %[[XREF]] : i32, !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL2:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: omp.reduction %[[I_PVT_VAL2]], %[[YREF]] : i32, !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL3:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: omp.reduction %[[I_PVT_VAL3]], %[[ZREF]] : i32, !fir.ref<i32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+! CHECK-LABEL: func.func @_QPmultiple_int_reductions_same_type() {
+! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFmultiple_int_reductions_same_typeEi"}
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFmultiple_int_reductions_same_typeEx"}
+! CHECK: %[[VAL_2:.*]] = fir.alloca i32 {bindc_name = "y", uniq_name = "_QFmultiple_int_reductions_same_typeEy"}
+! CHECK: %[[VAL_3:.*]] = fir.alloca i32 {bindc_name = "z", uniq_name = "_QFmultiple_int_reductions_same_typeEz"}
+! CHECK: %[[VAL_4:.*]] = arith.constant 0 : i32
+! CHECK: fir.store %[[VAL_4]] to %[[VAL_1]] : !fir.ref<i32>
+! CHECK: %[[VAL_5:.*]] = arith.constant 0 : i32
+! CHECK: fir.store %[[VAL_5]] to %[[VAL_2]] : !fir.ref<i32>
+! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32
+! CHECK: fir.store %[[VAL_6]] to %[[VAL_3]] : !fir.ref<i32>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_8:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_9:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_10:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@add_reduction_i_32 %[[VAL_1]] -> %[[VAL_11:.*]] : !fir.ref<i32>, @add_reduction_i_32 %[[VAL_2]] -> %[[VAL_12:.*]] : !fir.ref<i32>, @add_reduction_i_32 %[[VAL_3]] -> %[[VAL_13:.*]] : !fir.ref<i32>) for (%[[VAL_14:.*]]) : i32 = (%[[VAL_8]]) to (%[[VAL_9]]) inclusive step (%[[VAL_10]]) {
+! CHECK: fir.store %[[VAL_14]] to %[[VAL_7]] : !fir.ref<i32>
+! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_11]] : !fir.ref<i32>
+! CHECK: %[[VAL_16:.*]] = fir.load %[[VAL_7]] : !fir.ref<i32>
+! CHECK: %[[VAL_17:.*]] = arith.addi %[[VAL_15]], %[[VAL_16]] : i32
+! CHECK: fir.store %[[VAL_17]] to %[[VAL_11]] : !fir.ref<i32>
+! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_12]] : !fir.ref<i32>
+! CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_7]] : !fir.ref<i32>
+! CHECK: %[[VAL_20:.*]] = arith.addi %[[VAL_18]], %[[VAL_19]] : i32
+! CHECK: fir.store %[[VAL_20]] to %[[VAL_12]] : !fir.ref<i32>
+! CHECK: %[[VAL_21:.*]] = fir.load %[[VAL_13]] : !fir.ref<i32>
+! CHECK: %[[VAL_22:.*]] = fir.load %[[VAL_7]] : !fir.ref<i32>
+! CHECK: %[[VAL_23:.*]] = arith.addi %[[VAL_21]], %[[VAL_22]] : i32
+! CHECK: fir.store %[[VAL_23]] to %[[VAL_13]] : !fir.ref<i32>
+! CHECK: omp.yield
+! CHECK: }
+! CHECK: omp.terminator
+! CHECK: }
+! CHECK: return
+! CHECK: }
+
subroutine multiple_int_reductions_same_type
integer :: x,y,z
x = 0
@@ -192,26 +247,46 @@ subroutine multiple_int_reductions_same_type
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPmultiple_real_reductions_same_type
-!CHECK: %[[XREF:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFmultiple_real_reductions_same_typeEx"}
-!CHECK: %[[YREF:.*]] = fir.alloca f32 {bindc_name = "y", uniq_name = "_QFmultiple_real_reductions_same_typeEy"}
-!CHECK: %[[ZREF:.*]] = fir.alloca f32 {bindc_name = "z", uniq_name = "_QFmultiple_real_reductions_same_typeEz"}
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: omp.wsloop reduction(@[[RED_F32_NAME]] -> %[[XREF]] : !fir.ref<f32>, @[[RED_F32_NAME]] -> %[[YREF]] : !fir.ref<f32>, @[[RED_F32_NAME]] -> %[[ZREF]] : !fir.ref<f32>) for (%[[IVAL]]) : i32
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL1_I32:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL1_F32:.*]] = fir.convert %[[I_PVT_VAL1_I32]] : (i32) -> f32
-!CHECK: omp.reduction %[[I_PVT_VAL1_F32]], %[[XREF]] : f32, !fir.ref<f32>
-!CHECK: %[[I_PVT_VAL2_I32:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL2_F32:.*]] = fir.convert %[[I_PVT_VAL2_I32]] : (i32) -> f32
-!CHECK: omp.reduction %[[I_PVT_VAL2_F32]], %[[YREF]] : f32, !fir.ref<f32>
-!CHECK: %[[I_PVT_VAL3_I32:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL3_F32:.*]] = fir.convert %[[I_PVT_VAL3_I32]] : (i32) -> f32
-!CHECK: omp.reduction %[[I_PVT_VAL3_F32]], %[[ZREF]] : f32, !fir.ref<f32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+! CHECK-LABEL: func.func @_QPmultiple_real_reductions_same_type() {
+! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFmultiple_real_reductions_same_typeEi"}
+! CHECK: %[[VAL_1:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFmultiple_real_reductions_same_typeEx"}
+! CHECK: %[[VAL_2:.*]] = fir.alloca f32 {bindc_name = "y", uniq_name = "_QFmultiple_real_reductions_same_typeEy"}
+! CHECK: %[[VAL_3:.*]] = fir.alloca f32 {bindc_name = "z", uniq_name = "_QFmultiple_real_reductions_same_typeEz"}
+! CHECK: %[[VAL_4:.*]] = arith.constant 0.000000e+00 : f32
+! CHECK: fir.store %[[VAL_4]] to %[[VAL_1]] : !fir.ref<f32>
+! CHECK: %[[VAL_5:.*]] = arith.constant 0.000000e+00 : f32
+! CHECK: fir.store %[[VAL_5]] to %[[VAL_2]] : !fir.ref<f32>
+! CHECK: %[[VAL_6:.*]] = arith.constant 0.000000e+00 : f32
+! CHECK: fir.store %[[VAL_6]] to %[[VAL_3]] : !fir.ref<f32>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_8:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_9:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_10:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@add_reduction_f_32 %[[VAL_1]] -> %[[VAL_11:.*]] : !fir.ref<f32>, @add_reduction_f_32 %[[VAL_2]] -> %[[VAL_12:.*]] : !fir.ref<f32>, @add_reduction_f_32 %[[VAL_3]] -> %[[VAL_13:.*]] : !fir.ref<f32>) for (%[[VAL_14:.*]]) : i32 = (%[[VAL_8]]) to (%[[VAL_9]]) inclusive step (%[[VAL_10]]) {
+! CHECK: fir.store %[[VAL_14]] to %[[VAL_7]] : !fir.ref<i32>
+! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_11]] : !fir.ref<f32>
+! CHECK: %[[VAL_16:.*]] = fir.load %[[VAL_7]] : !fir.ref<i32>
+! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]] : (i32) -> f32
+! CHECK: %[[VAL_18:.*]] = arith.addf %[[VAL_15]], %[[VAL_17]] fastmath<contract> : f32
+! CHECK: fir.store %[[VAL_18]] to %[[VAL_11]] : !fir.ref<f32>
+! CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_12]] : !fir.ref<f32>
+! CHECK: %[[VAL_20:.*]] = fir.load %[[VAL_7]] : !fir.ref<i32>
+! CHECK: %[[VAL_21:.*]] = fir.convert %[[VAL_20]] : (i32) -> f32
+! CHECK: %[[VAL_22:.*]] = arith.addf %[[VAL_19]], %[[VAL_21]] fastmath<contract> : f32
+! CHECK: fir.store %[[VAL_22]] to %[[VAL_12]] : !fir.ref<f32>
+! CHECK: %[[VAL_23:.*]] = fir.load %[[VAL_13]] : !fir.ref<f32>
+! CHECK: %[[VAL_24:.*]] = fir.load %[[VAL_7]] : !fir.ref<i32>
+! CHECK: %[[VAL_25:.*]] = fir.convert %[[VAL_24]] : (i32) -> f32
+! CHECK: %[[VAL_26:.*]] = arith.addf %[[VAL_23]], %[[VAL_25]] fastmath<contract> : f32
+! CHECK: fir.store %[[VAL_26]] to %[[VAL_13]] : !fir.ref<f32>
+! CHECK: omp.yield
+! CHECK: }
+! CHECK: omp.terminator
+! CHECK: }
+! CHECK: return
+! CHECK: }
+
subroutine multiple_real_reductions_same_type
real :: x,y,z
x = 0.0
@@ -228,29 +303,54 @@ subroutine multiple_real_reductions_same_type
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPmultiple_reductions_different_type
-!CHECK: %[[WREF:.*]] = fir.alloca f64 {bindc_name = "w", uniq_name = "_QFmultiple_reductions_different_typeEw"}
-!CHECK: %[[XREF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFmultiple_reductions_different_typeEx"}
-!CHECK: %[[YREF:.*]] = fir.alloca i64 {bindc_name = "y", uniq_name = "_QFmultiple_reductions_different_typeEy"}
-!CHECK: %[[ZREF:.*]] = fir.alloca f32 {bindc_name = "z", uniq_name = "_QFmultiple_reductions_different_typeEz"}
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: omp.wsloop reduction(@[[RED_I32_NAME]] -> %[[XREF]] : !fir.ref<i32>, @[[RED_I64_NAME]] -> %[[YREF]] : !fir.ref<i64>, @[[RED_F32_NAME]] -> %[[ZREF]] : !fir.ref<f32>, @[[RED_F64_NAME]] -> %[[WREF]] : !fir.ref<f64>) for (%[[IVAL:.*]]) : i32
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL1_I32:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: omp.reduction %[[I_PVT_VAL1_I32]], %[[XREF]] : i32, !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL2_I32:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL2_I64:.*]] = fir.convert %[[I_PVT_VAL2_I32]] : (i32) -> i64
-!CHECK: omp.reduction %[[I_PVT_VAL2_I64]], %[[YREF]] : i64, !fir.ref<i64>
-!CHECK: %[[I_PVT_VAL3_I32:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL3_F32:.*]] = fir.convert %[[I_PVT_VAL3_I32]] : (i32) -> f32
-!CHECK: omp.reduction %[[I_PVT_VAL3_F32]], %[[ZREF]] : f32, !fir.ref<f32>
-!CHECK: %[[I_PVT_VAL4_I32:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL4_F64:.*]] = fir.convert %[[I_PVT_VAL4_I32]] : (i32) -> f64
-!CHECK: omp.reduction %[[I_PVT_VAL4_F64]], %[[WREF]] : f64, !fir.ref<f64>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+! CHECK-LABEL: func.func @_QPmultiple_reductions_different_type() {
+! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFmultiple_reductions_different_typeEi"}
+! CHECK: %[[VAL_1:.*]] = fir.alloca f64 {bindc_name = "w", uniq_name = "_QFmultiple_reductions_different_typeEw"}
+! CHECK: %[[VAL_2:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFmultiple_reductions_different_typeEx"}
+! CHECK: %[[VAL_3:.*]] = fir.alloca i64 {bindc_name = "y", uniq_name = "_QFmultiple_reductions_different_typeEy"}
+! CHECK: %[[VAL_4:.*]] = fir.alloca f32 {bindc_name = "z", uniq_name = "_QFmultiple_reductions_different_typeEz"}
+! CHECK: %[[VAL_5:.*]] = arith.constant 0 : i32
+! CHECK: fir.store %[[VAL_5]] to %[[VAL_2]] : !fir.ref<i32>
+! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i64
+! CHECK: fir.store %[[VAL_6]] to %[[VAL_3]] : !fir.ref<i64>
+! CHECK: %[[VAL_7:.*]] = arith.constant 0.000000e+00 : f32
+! CHECK: fir.store %[[VAL_7]] to %[[VAL_4]] : !fir.ref<f32>
+! CHECK: %[[VAL_8:.*]] = arith.constant 0.000000e+00 : f64
+! CHECK: fir.store %[[VAL_8]] to %[[VAL_1]] : !fir.ref<f64>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_9:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_10:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_11:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@add_reduction_i_32 %[[VAL_2]] -> %[[VAL_13:.*]] : !fir.ref<i32>, @add_reduction_i_64 %[[VAL_3]] -> %[[VAL_14:.*]] : !fir.ref<i64>, @add_reduction_f_32 %[[VAL_4]] -> %[[VAL_15:.*]] : !fir.ref<f32>, @add_reduction_f_64 %[[VAL_1]] -> %[[VAL_16:.*]] : !fir.ref<f64>) for (%[[VAL_17:.*]]) : i32 = (%[[VAL_10]]) to (%[[VAL_11]]) inclusive step (%[[VAL_12]]) {
+! CHECK: fir.store %[[VAL_17]] to %[[VAL_9]] : !fir.ref<i32>
+! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_13]] : !fir.ref<i32>
+! CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_9]] : !fir.ref<i32>
+! CHECK: %[[VAL_20:.*]] = arith.addi %[[VAL_18]], %[[VAL_19]] : i32
+! CHECK: fir.store %[[VAL_20]] to %[[VAL_13]] : !fir.ref<i32>
+! CHECK: %[[VAL_21:.*]] = fir.load %[[VAL_14]] : !fir.ref<i64>
+! CHECK: %[[VAL_22:.*]] = fir.load %[[VAL_9]] : !fir.ref<i32>
+! CHECK: %[[VAL_23:.*]] = fir.convert %[[VAL_22]] : (i32) -> i64
+! CHECK: %[[VAL_24:.*]] = arith.addi %[[VAL_21]], %[[VAL_23]] : i64
+! CHECK: fir.store %[[VAL_24]] to %[[VAL_14]] : !fir.ref<i64>
+! CHECK: %[[VAL_25:.*]] = fir.load %[[VAL_15]] : !fir.ref<f32>
+! CHECK: %[[VAL_26:.*]] = fir.load %[[VAL_9]] : !fir.ref<i32>
+! CHECK: %[[VAL_27:.*]] = fir.convert %[[VAL_26]] : (i32) -> f32
+! CHECK: %[[VAL_28:.*]] = arith.addf %[[VAL_25]], %[[VAL_27]] fastmath<contract> : f32
+! CHECK: fir.store %[[VAL_28]] to %[[VAL_15]] : !fir.ref<f32>
+! CHECK: %[[VAL_29:.*]] = fir.load %[[VAL_16]] : !fir.ref<f64>
+! CHECK: %[[VAL_30:.*]] = fir.load %[[VAL_9]] : !fir.ref<i32>
+! CHECK: %[[VAL_31:.*]] = fir.convert %[[VAL_30]] : (i32) -> f64
+! CHECK: %[[VAL_32:.*]] = arith.addf %[[VAL_29]], %[[VAL_31]] fastmath<contract> : f64
+! CHECK: fir.store %[[VAL_32]] to %[[VAL_16]] : !fir.ref<f64>
+! CHECK: omp.yield
+! CHECK: }
+! CHECK: omp.terminator
+! CHECK: }
+! CHECK: return
+! CHECK: }
+
+
subroutine multiple_reductions_different_type
integer :: x
integer(kind=8) :: y
diff --git a/flang/test/Lower/OpenMP/FIR/wsloop-reduction-iand.f90 b/flang/test/Lower/OpenMP/FIR/wsloop-reduction-iand.f90
index ecbcac8..9ce1725 100644
--- a/flang/test/Lower/OpenMP/FIR/wsloop-reduction-iand.f90
+++ b/flang/test/Lower/OpenMP/FIR/wsloop-reduction-iand.f90
@@ -10,13 +10,15 @@
!CHECK: omp.yield(%[[IAND_VAL_I]] : i32)
!CHECK-LABEL: @_QPreduction_iand
-!CHECK-SAME: %[[Y_BOX:.*]]: !fir.box<!fir.array<?xi32>>
+!CHECK-SAME: %[[Y_BOX:.*]]: !fir.box<!fir.array<?xi32>>
!CHECK: %[[X_REF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFreduction_iandEx"}
!CHECK: omp.parallel
-!CHECK: omp.wsloop reduction(@[[IAND_DECLARE_I]] -> %[[X_REF]] : !fir.ref<i32>) for
+!CHECK: omp.wsloop reduction(@[[IAND_DECLARE_I]] %[[X_REF]] -> %[[PRV:.+]] : !fir.ref<i32>) for
+!CHECK: %[[LPRV:.+]] = fir.load %[[PRV]] : !fir.ref<i32>
!CHECK: %[[Y_I_REF:.*]] = fir.coordinate_of %[[Y_BOX]]
!CHECK: %[[Y_I:.*]] = fir.load %[[Y_I_REF]] : !fir.ref<i32>
-!CHECK: omp.reduction %[[Y_I]], %[[X_REF]] : i32, !fir.ref<i32>
+!CHECK: %[[RES:.+]] = arith.andi %[[LPRV]], %[[Y_I]] : i32
+!CHECK: fir.store %[[RES]] to %[[PRV]] : !fir.ref<i32>
!CHECK: omp.yield
!CHECK: omp.terminator
diff --git a/flang/test/Lower/OpenMP/FIR/wsloop-reduction-ieor.f90 b/flang/test/Lower/OpenMP/FIR/wsloop-reduction-ieor.f90
index beb899f..f602741 100644
--- a/flang/test/Lower/OpenMP/FIR/wsloop-reduction-ieor.f90
+++ b/flang/test/Lower/OpenMP/FIR/wsloop-reduction-ieor.f90
@@ -10,13 +10,15 @@
!CHECK: omp.yield(%[[IEOR_VAL_I]] : i32)
!CHECK-LABEL: @_QPreduction_ieor
-!CHECK-SAME: %[[Y_BOX:.*]]: !fir.box<!fir.array<?xi32>>
+!CHECK-SAME: %[[Y_BOX:.*]]: !fir.box<!fir.array<?xi32>>
!CHECK: %[[X_REF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFreduction_ieorEx"}
!CHECK: omp.parallel
-!CHECK: omp.wsloop reduction(@[[IEOR_DECLARE_I]] -> %[[X_REF]] : !fir.ref<i32>) for
+!CHECK: omp.wsloop reduction(@[[IEOR_DECLARE_I]] %[[X_REF]] -> %[[PRV:.+]] : !fir.ref<i32>) for
+!CHECK: %[[LPRV:.+]] = fir.load %[[PRV]] : !fir.ref<i32>
!CHECK: %[[Y_I_REF:.*]] = fir.coordinate_of %[[Y_BOX]]
!CHECK: %[[Y_I:.*]] = fir.load %[[Y_I_REF]] : !fir.ref<i32>
-!CHECK: omp.reduction %[[Y_I]], %[[X_REF]] : i32, !fir.ref<i32>
+!CHECK: %[[RES:.+]] = arith.xori %[[LPRV]], %[[Y_I]] : i32
+!CHECK: fir.store %[[RES]] to %[[PRV]] : !fir.ref<i32>
!CHECK: omp.yield
!CHECK: omp.terminator
diff --git a/flang/test/Lower/OpenMP/FIR/wsloop-reduction-ior.f90 b/flang/test/Lower/OpenMP/FIR/wsloop-reduction-ior.f90
index 50291d2..bc14361 100644
--- a/flang/test/Lower/OpenMP/FIR/wsloop-reduction-ior.f90
+++ b/flang/test/Lower/OpenMP/FIR/wsloop-reduction-ior.f90
@@ -13,10 +13,12 @@
!CHECK-SAME: %[[Y_BOX:.*]]: !fir.box<!fir.array<?xi32>>
!CHECK: %[[X_REF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFreduction_iorEx"}
!CHECK: omp.parallel
-!CHECK: omp.wsloop reduction(@[[IOR_DECLARE_I]] -> %[[X_REF]] : !fir.ref<i32>) for
+!CHECK: omp.wsloop reduction(@[[IOR_DECLARE_I]] %[[X_REF]] -> %[[PRV:.+]] : !fir.ref<i32>) for
+!CHECK: %[[LPRV:.+]] = fir.load %[[PRV]] : !fir.ref<i32>
!CHECK: %[[Y_I_REF:.*]] = fir.coordinate_of %[[Y_BOX]]
!CHECK: %[[Y_I:.*]] = fir.load %[[Y_I_REF]] : !fir.ref<i32>
-!CHECK: omp.reduction %[[Y_I]], %[[X_REF]] : i32, !fir.ref<i32>
+!CHECK: %[[RES:.+]] = arith.ori %[[LPRV]], %[[Y_I]] : i32
+!CHECK: fir.store %[[RES]] to %[[PRV]] : !fir.ref<i32>
!CHECK: omp.yield
!CHECK: omp.terminator
diff --git a/flang/test/Lower/OpenMP/FIR/wsloop-reduction-logical-and.f90 b/flang/test/Lower/OpenMP/FIR/wsloop-reduction-logical-and.f90
deleted file mode 100644
index 3f40a05..0000000
--- a/flang/test/Lower/OpenMP/FIR/wsloop-reduction-logical-and.f90
+++ /dev/null
@@ -1,137 +0,0 @@
-! RUN: bbc -emit-fir -hlfir=false -fopenmp %s -o - | FileCheck %s
-! RUN: %flang_fc1 -emit-fir -flang-deprecated-no-hlfir -fopenmp %s -o - | FileCheck %s
-
-!CHECK-LABEL: omp.reduction.declare
-!CHECK-SAME: @[[RED_NAME:.*]] : !fir.logical<4> init {
-!CHECK: ^bb0(%{{.*}}: !fir.logical<4>):
-!CHECK: %true = arith.constant true
-!CHECK: %[[true_fir:.*]] = fir.convert %true : (i1) -> !fir.logical<4>
-!CHECK: omp.yield(%[[true_fir]] : !fir.logical<4>)
-!CHECK: } combiner {
-!CHECK: ^bb0(%[[ARG0:.*]]: !fir.logical<4>, %[[ARG1:.*]]: !fir.logical<4>):
-!CHECK: %[[arg0_i1:.*]] = fir.convert %[[ARG0]] : (!fir.logical<4>) -> i1
-!CHECK: %[[arg1_i1:.*]] = fir.convert %[[ARG1]] : (!fir.logical<4>) -> i1
-!CHECK: %[[RES:.*]] = arith.andi %[[arg0_i1]], %[[arg1_i1]] : i1
-!CHECK: %[[RES_logical:.*]] = fir.convert %[[RES]] : (i1) -> !fir.logical<4>
-!CHECK: omp.yield(%[[RES_logical]] : !fir.logical<4>)
-!CHECK: }
-
-!CHECK-LABEL: func.func @_QPsimple_reduction(
-!CHECK-SAME: %[[ARRAY:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
-!CHECK: %[[IREF:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reductionEi"}
-!CHECK: %[[XREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reductionEx"}
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_NAME]] -> %[[XREF]] : !fir.ref<!fir.logical<4>>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]]) {
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[CONVI_64:.*]] = fir.convert %[[I_PVT_VAL]] : (i32) -> i64
-!CHECK: %[[C1_64:.*]] = arith.constant 1 : i64
-!CHECK: %[[SUBI:.*]] = arith.subi %[[CONVI_64]], %[[C1_64]] : i64
-!CHECK: %[[Y_PVT_REF:.*]] = fir.coordinate_of %[[ARRAY]], %[[SUBI]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[YVAL:.*]] = fir.load %[[Y_PVT_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[YVAL]], %[[XREF]] : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
-subroutine simple_reduction(y)
- logical :: x, y(100)
- x = .true.
- !$omp parallel
- !$omp do reduction(.and.:x)
- do i=1, 100
- x = x .and. y(i)
- end do
- !$omp end do
- !$omp end parallel
-end subroutine
-
-!CHECK-LABEL: func.func @_QPsimple_reduction_switch_order(
-!CHECK-SAME: %[[ARRAY:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
-!CHECK: %[[IREF:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reduction_switch_orderEi"}
-!CHECK: %[[XREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reduction_switch_orderEx"}
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_NAME]] -> %[[XREF]] : !fir.ref<!fir.logical<4>>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]]) {
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[CONVI_64:.*]] = fir.convert %[[I_PVT_VAL]] : (i32) -> i64
-!CHECK: %[[C1_64:.*]] = arith.constant 1 : i64
-!CHECK: %[[SUBI:.*]] = arith.subi %[[CONVI_64]], %[[C1_64]] : i64
-!CHECK: %[[Y_PVT_REF:.*]] = fir.coordinate_of %[[ARRAY]], %[[SUBI]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[YVAL:.*]] = fir.load %[[Y_PVT_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[YVAL]], %[[XREF]] : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
-subroutine simple_reduction_switch_order(y)
- logical :: x, y(100)
- x = .true.
- !$omp parallel
- !$omp do reduction(.and.:x)
- do i=1, 100
- x = y(i) .and. x
- end do
- !$omp end do
- !$omp end parallel
-end subroutine
-
-!CHECK-LABEL: func.func @_QPmultiple_reductions
-!CHECK-SAME %[[ARRAY:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "w"}) {
-!CHECK: %[[IREF:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFmultiple_reductionsEi"}
-!CHECK: %[[XREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFmultiple_reductionsEx"}
-!CHECK: %[[YREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "y", uniq_name = "_QFmultiple_reductionsEy"}
-!CHECK: %[[ZREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "z", uniq_name = "_QFmultiple_reductionsEz"}
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_NAME]] -> %[[XREF]] : !fir.ref<!fir.logical<4>>, @[[RED_NAME]] -> %[[YREF]] : !fir.ref<!fir.logical<4>>, @[[RED_NAME]] -> %[[ZREF]] : !fir.ref<!fir.logical<4>>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]]) {
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL1:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[CONVI_64_1:.*]] = fir.convert %[[I_PVT_VAL1]] : (i32) -> i64
-!CHECK: %[[C1_64:.*]] = arith.constant 1 : i64
-!CHECK: %[[SUBI_1:.*]] = arith.subi %[[CONVI_64_1]], %[[C1_64]] : i64
-!CHECK: %[[W_PVT_REF_1:.*]] = fir.coordinate_of %[[ARRAY]], %[[SUBI_1]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[WVAL:.*]] = fir.load %[[W_PVT_REF_1]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[WVAL]], %[[XREF]] : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: %[[I_PVT_VAL2:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[CONVI_64_2:.*]] = fir.convert %[[I_PVT_VAL2]] : (i32) -> i64
-!CHECK: %[[C1_64:.*]] = arith.constant 1 : i64
-!CHECK: %[[SUBI_2:.*]] = arith.subi %[[CONVI_64_2]], %[[C1_64]] : i64
-!CHECK: %[[W_PVT_REF_2:.*]] = fir.coordinate_of %[[ARRAY]], %[[SUBI_2]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[WVAL:.*]] = fir.load %[[W_PVT_REF_2]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[WVAL]], %[[YREF]] : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: %[[I_PVT_VAL3:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[CONVI_64_3:.*]] = fir.convert %[[I_PVT_VAL3]] : (i32) -> i64
-!CHECK: %[[C1_64:.*]] = arith.constant 1 : i64
-!CHECK: %[[SUBI_3:.*]] = arith.subi %[[CONVI_64_3]], %[[C1_64]] : i64
-!CHECK: %[[W_PVT_REF_3:.*]] = fir.coordinate_of %[[ARRAY]], %[[SUBI_3]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[WVAL:.*]] = fir.load %[[W_PVT_REF_3]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[WVAL]], %[[ZREF]] : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
-subroutine multiple_reductions(w)
- logical :: x,y,z,w(100)
- x = .true.
- y = .true.
- z = .true.
- !$omp parallel
- !$omp do reduction(.and.:x,y,z)
- do i=1, 100
- x = x .and. w(i)
- y = y .and. w(i)
- z = z .and. w(i)
- end do
- !$omp end do
- !$omp end parallel
-end subroutine
-
diff --git a/flang/test/Lower/OpenMP/FIR/wsloop-reduction-logical-eqv.f90 b/flang/test/Lower/OpenMP/FIR/wsloop-reduction-logical-eqv.f90
index 16180da..d5aacd7 100644
--- a/flang/test/Lower/OpenMP/FIR/wsloop-reduction-logical-eqv.f90
+++ b/flang/test/Lower/OpenMP/FIR/wsloop-reduction-logical-eqv.f90
@@ -1,42 +1,53 @@
! RUN: bbc -emit-fir -hlfir=false -fopenmp %s -o - | FileCheck %s
! RUN: %flang_fc1 -emit-fir -flang-deprecated-no-hlfir -fopenmp %s -o - | FileCheck %s
-!CHECK-LABEL: omp.reduction.declare
-!CHECK-SAME: @[[RED_NAME:.*]] : !fir.logical<4> init {
-!CHECK: ^bb0(%{{.*}}: !fir.logical<4>):
-!CHECK: %true = arith.constant true
-!CHECK: %[[true_fir:.*]] = fir.convert %true : (i1) -> !fir.logical<4>
-!CHECK: omp.yield(%[[true_fir]] : !fir.logical<4>)
-!CHECK: } combiner {
-!CHECK: ^bb0(%[[ARG0:.*]]: !fir.logical<4>, %[[ARG1:.*]]: !fir.logical<4>):
-!CHECK: %[[arg0_i1:.*]] = fir.convert %[[ARG0]] : (!fir.logical<4>) -> i1
-!CHECK: %[[arg1_i1:.*]] = fir.convert %[[ARG1]] : (!fir.logical<4>) -> i1
-!CHECK: %[[RES:.*]] = arith.cmpi eq, %[[arg0_i1]], %[[arg1_i1]] : i1
-!CHECK: %[[RES_logical:.*]] = fir.convert %[[RES]] : (i1) -> !fir.logical<4>
-!CHECK: omp.yield(%[[RES_logical]] : !fir.logical<4>)
-!CHECK: }
+! NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
+
+! CHECK-LABEL: omp.reduction.declare @eqv_reduction : !fir.logical<4> init {
+! CHECK: ^bb0(%[[VAL_0:.*]]: !fir.logical<4>):
+! CHECK: %[[VAL_1:.*]] = arith.constant true
+! CHECK: %[[VAL_2:.*]] = fir.convert %[[VAL_1]] : (i1) -> !fir.logical<4>
+! CHECK: omp.yield(%[[VAL_2]] : !fir.logical<4>)
+
+! CHECK-LABEL: } combiner {
+! CHECK: ^bb0(%[[VAL_0:.*]]: !fir.logical<4>, %[[VAL_1:.*]]: !fir.logical<4>):
+! CHECK: %[[VAL_2:.*]] = fir.convert %[[VAL_0]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_3:.*]] = fir.convert %[[VAL_1]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_4:.*]] = arith.cmpi eq, %[[VAL_2]], %[[VAL_3]] : i1
+! CHECK: %[[VAL_5:.*]] = fir.convert %[[VAL_4]] : (i1) -> !fir.logical<4>
+! CHECK: omp.yield(%[[VAL_5]] : !fir.logical<4>)
+! CHECK: }
+
+! CHECK-LABEL: func.func @_QPsimple_reduction(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reductionEi"}
+! CHECK: %[[VAL_2:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reductionEx"}
+! CHECK: %[[VAL_3:.*]] = arith.constant true
+! CHECK: %[[VAL_4:.*]] = fir.convert %[[VAL_3]] : (i1) -> !fir.logical<4>
+! CHECK: fir.store %[[VAL_4]] to %[[VAL_2]] : !fir.ref<!fir.logical<4>>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_6:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_7:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_8:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@eqv_reduction %[[VAL_2]] -> %[[VAL_9:.*]] : !fir.ref<!fir.logical<4>>) for (%[[VAL_10:.*]]) : i32 = (%[[VAL_6]]) to (%[[VAL_7]]) inclusive step (%[[VAL_8]]) {
+! CHECK: fir.store %[[VAL_10]] to %[[VAL_5]] : !fir.ref<i32>
+! CHECK: %[[VAL_11:.*]] = fir.load %[[VAL_9]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_12:.*]] = fir.load %[[VAL_5]] : !fir.ref<i32>
+! CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_12]] : (i32) -> i64
+! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i64
+! CHECK: %[[VAL_15:.*]] = arith.subi %[[VAL_13]], %[[VAL_14]] : i64
+! CHECK: %[[VAL_16:.*]] = fir.coordinate_of %[[VAL_0]], %[[VAL_15]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_17:.*]] = fir.load %[[VAL_16]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_18:.*]] = fir.convert %[[VAL_11]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_17]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_20:.*]] = arith.cmpi eq, %[[VAL_18]], %[[VAL_19]] : i1
+! CHECK: %[[VAL_21:.*]] = fir.convert %[[VAL_20]] : (i1) -> !fir.logical<4>
+! CHECK: fir.store %[[VAL_21]] to %[[VAL_9]] : !fir.ref<!fir.logical<4>>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: return
-!CHECK-LABEL: func.func @_QPsimple_reduction(
-!CHECK-SAME: %[[ARRAY:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
-!CHECK: %[[IREF:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reductionEi"}
-!CHECK: %[[XREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reductionEx"}
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_NAME]] -> %[[XREF]] : !fir.ref<!fir.logical<4>>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]]) {
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[CONVI_64:.*]] = fir.convert %[[I_PVT_VAL]] : (i32) -> i64
-!CHECK: %[[C1_64:.*]] = arith.constant 1 : i64
-!CHECK: %[[SUBI:.*]] = arith.subi %[[CONVI_64]], %[[C1_64]] : i64
-!CHECK: %[[Y_PVT_REF:.*]] = fir.coordinate_of %[[ARRAY]], %[[SUBI]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[YVAL:.*]] = fir.load %[[Y_PVT_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[YVAL]], %[[XREF]] : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
subroutine simple_reduction(y)
logical :: x, y(100)
x = .true.
@@ -49,27 +60,36 @@ subroutine simple_reduction(y)
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPsimple_reduction_switch_order(
-!CHECK-SAME: %[[ARRAY:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
-!CHECK: %[[IREF:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reduction_switch_orderEi"}
-!CHECK: %[[XREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reduction_switch_orderEx"}
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_NAME]] -> %[[XREF]] : !fir.ref<!fir.logical<4>>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]]) {
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[CONVI_64:.*]] = fir.convert %[[I_PVT_VAL]] : (i32) -> i64
-!CHECK: %[[C1_64:.*]] = arith.constant 1 : i64
-!CHECK: %[[SUBI:.*]] = arith.subi %[[CONVI_64]], %[[C1_64]] : i64
-!CHECK: %[[Y_PVT_REF:.*]] = fir.coordinate_of %[[ARRAY]], %[[SUBI]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[YVAL:.*]] = fir.load %[[Y_PVT_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[YVAL]], %[[XREF]] : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+! CHECK-LABEL: func.func @_QPsimple_reduction_switch_order(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reduction_switch_orderEi"}
+! CHECK: %[[VAL_2:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reduction_switch_orderEx"}
+! CHECK: %[[VAL_3:.*]] = arith.constant true
+! CHECK: %[[VAL_4:.*]] = fir.convert %[[VAL_3]] : (i1) -> !fir.logical<4>
+! CHECK: fir.store %[[VAL_4]] to %[[VAL_2]] : !fir.ref<!fir.logical<4>>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_6:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_7:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_8:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@eqv_reduction %[[VAL_2]] -> %[[VAL_9:.*]] : !fir.ref<!fir.logical<4>>) for (%[[VAL_10:.*]]) : i32 = (%[[VAL_6]]) to (%[[VAL_7]]) inclusive step (%[[VAL_8]]) {
+! CHECK: fir.store %[[VAL_10]] to %[[VAL_5]] : !fir.ref<i32>
+! CHECK: %[[VAL_11:.*]] = fir.load %[[VAL_5]] : !fir.ref<i32>
+! CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_11]] : (i32) -> i64
+! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i64
+! CHECK: %[[VAL_14:.*]] = arith.subi %[[VAL_12]], %[[VAL_13]] : i64
+! CHECK: %[[VAL_15:.*]] = fir.coordinate_of %[[VAL_0]], %[[VAL_14]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_16:.*]] = fir.load %[[VAL_15]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_17:.*]] = fir.load %[[VAL_9]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_18:.*]] = fir.convert %[[VAL_16]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_17]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_20:.*]] = arith.cmpi eq, %[[VAL_18]], %[[VAL_19]] : i1
+! CHECK: %[[VAL_21:.*]] = fir.convert %[[VAL_20]] : (i1) -> !fir.logical<4>
+! CHECK: fir.store %[[VAL_21]] to %[[VAL_9]] : !fir.ref<!fir.logical<4>>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: return
+
subroutine simple_reduction_switch_order(y)
logical :: x, y(100)
x = .true.
@@ -82,43 +102,68 @@ subroutine simple_reduction_switch_order(y)
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPmultiple_reductions
-!CHECK-SAME %[[ARRAY:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "w"}) {
-!CHECK: %[[IREF:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFmultiple_reductionsEi"}
-!CHECK: %[[XREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFmultiple_reductionsEx"}
-!CHECK: %[[YREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "y", uniq_name = "_QFmultiple_reductionsEy"}
-!CHECK: %[[ZREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "z", uniq_name = "_QFmultiple_reductionsEz"}
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_NAME]] -> %[[XREF]] : !fir.ref<!fir.logical<4>>, @[[RED_NAME]] -> %[[YREF]] : !fir.ref<!fir.logical<4>>, @[[RED_NAME]] -> %[[ZREF]] : !fir.ref<!fir.logical<4>>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]]) {
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL1:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[CONVI_64_1:.*]] = fir.convert %[[I_PVT_VAL1]] : (i32) -> i64
-!CHECK: %[[C1_64:.*]] = arith.constant 1 : i64
-!CHECK: %[[SUBI_1:.*]] = arith.subi %[[CONVI_64_1]], %[[C1_64]] : i64
-!CHECK: %[[W_PVT_REF_1:.*]] = fir.coordinate_of %[[ARRAY]], %[[SUBI_1]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[WVAL:.*]] = fir.load %[[W_PVT_REF_1]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[WVAL]], %[[XREF]] : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: %[[I_PVT_VAL2:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[CONVI_64_2:.*]] = fir.convert %[[I_PVT_VAL2]] : (i32) -> i64
-!CHECK: %[[C1_64:.*]] = arith.constant 1 : i64
-!CHECK: %[[SUBI_2:.*]] = arith.subi %[[CONVI_64_2]], %[[C1_64]] : i64
-!CHECK: %[[W_PVT_REF_2:.*]] = fir.coordinate_of %[[ARRAY]], %[[SUBI_2]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[WVAL:.*]] = fir.load %[[W_PVT_REF_2]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[WVAL]], %[[YREF]] : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: %[[I_PVT_VAL3:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[CONVI_64_3:.*]] = fir.convert %[[I_PVT_VAL3]] : (i32) -> i64
-!CHECK: %[[C1_64:.*]] = arith.constant 1 : i64
-!CHECK: %[[SUBI_3:.*]] = arith.subi %[[CONVI_64_3]], %[[C1_64]] : i64
-!CHECK: %[[W_PVT_REF_3:.*]] = fir.coordinate_of %[[ARRAY]], %[[SUBI_3]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[WVAL:.*]] = fir.load %[[W_PVT_REF_3]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[WVAL]], %[[ZREF]] : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+! CHECK-LABEL: func.func @_QPmultiple_reductions(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "w"}) {
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFmultiple_reductionsEi"}
+! CHECK: %[[VAL_2:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFmultiple_reductionsEx"}
+! CHECK: %[[VAL_3:.*]] = fir.alloca !fir.logical<4> {bindc_name = "y", uniq_name = "_QFmultiple_reductionsEy"}
+! CHECK: %[[VAL_4:.*]] = fir.alloca !fir.logical<4> {bindc_name = "z", uniq_name = "_QFmultiple_reductionsEz"}
+! CHECK: %[[VAL_5:.*]] = arith.constant true
+! CHECK: %[[VAL_6:.*]] = fir.convert %[[VAL_5]] : (i1) -> !fir.logical<4>
+! CHECK: fir.store %[[VAL_6]] to %[[VAL_2]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_7:.*]] = arith.constant true
+! CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_7]] : (i1) -> !fir.logical<4>
+! CHECK: fir.store %[[VAL_8]] to %[[VAL_3]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_9:.*]] = arith.constant true
+! CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_9]] : (i1) -> !fir.logical<4>
+! CHECK: fir.store %[[VAL_10]] to %[[VAL_4]] : !fir.ref<!fir.logical<4>>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@eqv_reduction %[[VAL_2]] -> %[[VAL_15:.*]] : !fir.ref<!fir.logical<4>>, @eqv_reduction %[[VAL_3]] -> %[[VAL_16:.*]] : !fir.ref<!fir.logical<4>>, @eqv_reduction %[[VAL_4]] -> %[[VAL_17:.*]] : !fir.ref<!fir.logical<4>>) for (%[[VAL_18:.*]]) : i32 = (%[[VAL_12]]) to (%[[VAL_13]]) inclusive step (%[[VAL_14]]) {
+! CHECK: fir.store %[[VAL_18]] to %[[VAL_11]] : !fir.ref<i32>
+! CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_15]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_20:.*]] = fir.load %[[VAL_11]] : !fir.ref<i32>
+! CHECK: %[[VAL_21:.*]] = fir.convert %[[VAL_20]] : (i32) -> i64
+! CHECK: %[[VAL_22:.*]] = arith.constant 1 : i64
+! CHECK: %[[VAL_23:.*]] = arith.subi %[[VAL_21]], %[[VAL_22]] : i64
+! CHECK: %[[VAL_24:.*]] = fir.coordinate_of %[[VAL_0]], %[[VAL_23]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_25:.*]] = fir.load %[[VAL_24]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_26:.*]] = fir.convert %[[VAL_19]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_27:.*]] = fir.convert %[[VAL_25]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_28:.*]] = arith.cmpi eq, %[[VAL_26]], %[[VAL_27]] : i1
+! CHECK: %[[VAL_29:.*]] = fir.convert %[[VAL_28]] : (i1) -> !fir.logical<4>
+! CHECK: fir.store %[[VAL_29]] to %[[VAL_15]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_30:.*]] = fir.load %[[VAL_16]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_31:.*]] = fir.load %[[VAL_11]] : !fir.ref<i32>
+! CHECK: %[[VAL_32:.*]] = fir.convert %[[VAL_31]] : (i32) -> i64
+! CHECK: %[[VAL_33:.*]] = arith.constant 1 : i64
+! CHECK: %[[VAL_34:.*]] = arith.subi %[[VAL_32]], %[[VAL_33]] : i64
+! CHECK: %[[VAL_35:.*]] = fir.coordinate_of %[[VAL_0]], %[[VAL_34]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_36:.*]] = fir.load %[[VAL_35]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_37:.*]] = fir.convert %[[VAL_30]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_38:.*]] = fir.convert %[[VAL_36]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_39:.*]] = arith.cmpi eq, %[[VAL_37]], %[[VAL_38]] : i1
+! CHECK: %[[VAL_40:.*]] = fir.convert %[[VAL_39]] : (i1) -> !fir.logical<4>
+! CHECK: fir.store %[[VAL_40]] to %[[VAL_16]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_41:.*]] = fir.load %[[VAL_17]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_42:.*]] = fir.load %[[VAL_11]] : !fir.ref<i32>
+! CHECK: %[[VAL_43:.*]] = fir.convert %[[VAL_42]] : (i32) -> i64
+! CHECK: %[[VAL_44:.*]] = arith.constant 1 : i64
+! CHECK: %[[VAL_45:.*]] = arith.subi %[[VAL_43]], %[[VAL_44]] : i64
+! CHECK: %[[VAL_46:.*]] = fir.coordinate_of %[[VAL_0]], %[[VAL_45]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_47:.*]] = fir.load %[[VAL_46]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_48:.*]] = fir.convert %[[VAL_41]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_49:.*]] = fir.convert %[[VAL_47]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_50:.*]] = arith.cmpi eq, %[[VAL_48]], %[[VAL_49]] : i1
+! CHECK: %[[VAL_51:.*]] = fir.convert %[[VAL_50]] : (i1) -> !fir.logical<4>
+! CHECK: fir.store %[[VAL_51]] to %[[VAL_17]] : !fir.ref<!fir.logical<4>>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: return
+
subroutine multiple_reductions(w)
logical :: x,y,z,w(100)
x = .true.
diff --git a/flang/test/Lower/OpenMP/FIR/wsloop-reduction-logical-neqv.f90 b/flang/test/Lower/OpenMP/FIR/wsloop-reduction-logical-neqv.f90
index 372f131..9f44e0e 100644
--- a/flang/test/Lower/OpenMP/FIR/wsloop-reduction-logical-neqv.f90
+++ b/flang/test/Lower/OpenMP/FIR/wsloop-reduction-logical-neqv.f90
@@ -1,42 +1,54 @@
! RUN: bbc -emit-fir -hlfir=false -fopenmp %s -o - | FileCheck %s
! RUN: %flang_fc1 -emit-fir -flang-deprecated-no-hlfir -fopenmp %s -o - | FileCheck %s
-!CHECK-LABEL: omp.reduction.declare
-!CHECK-SAME: @[[RED_NAME:.*]] : !fir.logical<4> init {
-!CHECK: ^bb0(%{{.*}}: !fir.logical<4>):
-!CHECK: %false = arith.constant false
-!CHECK: %[[false_fir:.*]] = fir.convert %false : (i1) -> !fir.logical<4>
-!CHECK: omp.yield(%[[false_fir]] : !fir.logical<4>)
-!CHECK: } combiner {
-!CHECK: ^bb0(%[[ARG0:.*]]: !fir.logical<4>, %[[ARG1:.*]]: !fir.logical<4>):
-!CHECK: %[[arg0_i1:.*]] = fir.convert %[[ARG0]] : (!fir.logical<4>) -> i1
-!CHECK: %[[arg1_i1:.*]] = fir.convert %[[ARG1]] : (!fir.logical<4>) -> i1
-!CHECK: %[[RES:.*]] = arith.cmpi ne, %[[arg0_i1]], %[[arg1_i1]] : i1
-!CHECK: %[[RES_logical:.*]] = fir.convert %[[RES]] : (i1) -> !fir.logical<4>
-!CHECK: omp.yield(%[[RES_logical]] : !fir.logical<4>)
-!CHECK: }
+! NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
+
+
+! CHECK-LABEL: omp.reduction.declare @neqv_reduction : !fir.logical<4> init {
+! CHECK: ^bb0(%[[VAL_0:.*]]: !fir.logical<4>):
+! CHECK: %[[VAL_1:.*]] = arith.constant false
+! CHECK: %[[VAL_2:.*]] = fir.convert %[[VAL_1]] : (i1) -> !fir.logical<4>
+! CHECK: omp.yield(%[[VAL_2]] : !fir.logical<4>)
+
+! CHECK-LABEL: } combiner {
+! CHECK: ^bb0(%[[VAL_0:.*]]: !fir.logical<4>, %[[VAL_1:.*]]: !fir.logical<4>):
+! CHECK: %[[VAL_2:.*]] = fir.convert %[[VAL_0]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_3:.*]] = fir.convert %[[VAL_1]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_4:.*]] = arith.cmpi ne, %[[VAL_2]], %[[VAL_3]] : i1
+! CHECK: %[[VAL_5:.*]] = fir.convert %[[VAL_4]] : (i1) -> !fir.logical<4>
+! CHECK: omp.yield(%[[VAL_5]] : !fir.logical<4>)
+! CHECK: }
+
+! CHECK-LABEL: func.func @_QPsimple_reduction(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reductionEi"}
+! CHECK: %[[VAL_2:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reductionEx"}
+! CHECK: %[[VAL_3:.*]] = arith.constant true
+! CHECK: %[[VAL_4:.*]] = fir.convert %[[VAL_3]] : (i1) -> !fir.logical<4>
+! CHECK: fir.store %[[VAL_4]] to %[[VAL_2]] : !fir.ref<!fir.logical<4>>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_6:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_7:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_8:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@neqv_reduction %[[VAL_2]] -> %[[VAL_9:.*]] : !fir.ref<!fir.logical<4>>) for (%[[VAL_10:.*]]) : i32 = (%[[VAL_6]]) to (%[[VAL_7]]) inclusive step (%[[VAL_8]]) {
+! CHECK: fir.store %[[VAL_10]] to %[[VAL_5]] : !fir.ref<i32>
+! CHECK: %[[VAL_11:.*]] = fir.load %[[VAL_9]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_12:.*]] = fir.load %[[VAL_5]] : !fir.ref<i32>
+! CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_12]] : (i32) -> i64
+! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i64
+! CHECK: %[[VAL_15:.*]] = arith.subi %[[VAL_13]], %[[VAL_14]] : i64
+! CHECK: %[[VAL_16:.*]] = fir.coordinate_of %[[VAL_0]], %[[VAL_15]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_17:.*]] = fir.load %[[VAL_16]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_18:.*]] = fir.convert %[[VAL_11]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_17]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_20:.*]] = arith.cmpi ne, %[[VAL_18]], %[[VAL_19]] : i1
+! CHECK: %[[VAL_21:.*]] = fir.convert %[[VAL_20]] : (i1) -> !fir.logical<4>
+! CHECK: fir.store %[[VAL_21]] to %[[VAL_9]] : !fir.ref<!fir.logical<4>>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: return
-!CHECK-LABEL: func.func @_QPsimple_reduction(
-!CHECK-SAME: %[[ARRAY:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
-!CHECK: %[[IREF:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reductionEi"}
-!CHECK: %[[XREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reductionEx"}
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_NAME]] -> %[[XREF]] : !fir.ref<!fir.logical<4>>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]]) {
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[CONVI_64:.*]] = fir.convert %[[I_PVT_VAL]] : (i32) -> i64
-!CHECK: %[[C1_64:.*]] = arith.constant 1 : i64
-!CHECK: %[[SUBI:.*]] = arith.subi %[[CONVI_64]], %[[C1_64]] : i64
-!CHECK: %[[Y_PVT_REF:.*]] = fir.coordinate_of %[[ARRAY]], %[[SUBI]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[YVAL:.*]] = fir.load %[[Y_PVT_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[YVAL]], %[[XREF]] : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
subroutine simple_reduction(y)
logical :: x, y(100)
x = .true.
@@ -49,27 +61,36 @@ subroutine simple_reduction(y)
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPsimple_reduction_switch_order(
-!CHECK-SAME: %[[ARRAY:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
-!CHECK: %[[IREF:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reduction_switch_orderEi"}
-!CHECK: %[[XREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reduction_switch_orderEx"}
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_NAME]] -> %[[XREF]] : !fir.ref<!fir.logical<4>>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]]) {
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[CONVI_64:.*]] = fir.convert %[[I_PVT_VAL]] : (i32) -> i64
-!CHECK: %[[C1_64:.*]] = arith.constant 1 : i64
-!CHECK: %[[SUBI:.*]] = arith.subi %[[CONVI_64]], %[[C1_64]] : i64
-!CHECK: %[[Y_PVT_REF:.*]] = fir.coordinate_of %[[ARRAY]], %[[SUBI]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[YVAL:.*]] = fir.load %[[Y_PVT_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[YVAL]], %[[XREF]] : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+! CHECK-LABEL: func.func @_QPsimple_reduction_switch_order(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reduction_switch_orderEi"}
+! CHECK: %[[VAL_2:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reduction_switch_orderEx"}
+! CHECK: %[[VAL_3:.*]] = arith.constant true
+! CHECK: %[[VAL_4:.*]] = fir.convert %[[VAL_3]] : (i1) -> !fir.logical<4>
+! CHECK: fir.store %[[VAL_4]] to %[[VAL_2]] : !fir.ref<!fir.logical<4>>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_6:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_7:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_8:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@neqv_reduction %[[VAL_2]] -> %[[VAL_9:.*]] : !fir.ref<!fir.logical<4>>) for (%[[VAL_10:.*]]) : i32 = (%[[VAL_6]]) to (%[[VAL_7]]) inclusive step (%[[VAL_8]]) {
+! CHECK: fir.store %[[VAL_10]] to %[[VAL_5]] : !fir.ref<i32>
+! CHECK: %[[VAL_11:.*]] = fir.load %[[VAL_5]] : !fir.ref<i32>
+! CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_11]] : (i32) -> i64
+! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i64
+! CHECK: %[[VAL_14:.*]] = arith.subi %[[VAL_12]], %[[VAL_13]] : i64
+! CHECK: %[[VAL_15:.*]] = fir.coordinate_of %[[VAL_0]], %[[VAL_14]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_16:.*]] = fir.load %[[VAL_15]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_17:.*]] = fir.load %[[VAL_9]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_18:.*]] = fir.convert %[[VAL_16]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_17]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_20:.*]] = arith.cmpi ne, %[[VAL_18]], %[[VAL_19]] : i1
+! CHECK: %[[VAL_21:.*]] = fir.convert %[[VAL_20]] : (i1) -> !fir.logical<4>
+! CHECK: fir.store %[[VAL_21]] to %[[VAL_9]] : !fir.ref<!fir.logical<4>>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: return
+
subroutine simple_reduction_switch_order(y)
logical :: x, y(100)
x = .true.
@@ -82,43 +103,69 @@ subroutine simple_reduction_switch_order(y)
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPmultiple_reductions
-!CHECK-SAME %[[ARRAY:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "w"}) {
-!CHECK: %[[IREF:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFmultiple_reductionsEi"}
-!CHECK: %[[XREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFmultiple_reductionsEx"}
-!CHECK: %[[YREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "y", uniq_name = "_QFmultiple_reductionsEy"}
-!CHECK: %[[ZREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "z", uniq_name = "_QFmultiple_reductionsEz"}
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_NAME]] -> %[[XREF]] : !fir.ref<!fir.logical<4>>, @[[RED_NAME]] -> %[[YREF]] : !fir.ref<!fir.logical<4>>, @[[RED_NAME]] -> %[[ZREF]] : !fir.ref<!fir.logical<4>>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]]) {
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL1:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[CONVI_64_1:.*]] = fir.convert %[[I_PVT_VAL1]] : (i32) -> i64
-!CHECK: %[[C1_64:.*]] = arith.constant 1 : i64
-!CHECK: %[[SUBI_1:.*]] = arith.subi %[[CONVI_64_1]], %[[C1_64]] : i64
-!CHECK: %[[W_PVT_REF_1:.*]] = fir.coordinate_of %[[ARRAY]], %[[SUBI_1]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[WVAL:.*]] = fir.load %[[W_PVT_REF_1]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[WVAL]], %[[XREF]] : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: %[[I_PVT_VAL2:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[CONVI_64_2:.*]] = fir.convert %[[I_PVT_VAL2]] : (i32) -> i64
-!CHECK: %[[C1_64:.*]] = arith.constant 1 : i64
-!CHECK: %[[SUBI_2:.*]] = arith.subi %[[CONVI_64_2]], %[[C1_64]] : i64
-!CHECK: %[[W_PVT_REF_2:.*]] = fir.coordinate_of %[[ARRAY]], %[[SUBI_2]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[WVAL:.*]] = fir.load %[[W_PVT_REF_2]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[WVAL]], %[[YREF]] : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: %[[I_PVT_VAL3:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[CONVI_64_3:.*]] = fir.convert %[[I_PVT_VAL3]] : (i32) -> i64
-!CHECK: %[[C1_64:.*]] = arith.constant 1 : i64
-!CHECK: %[[SUBI_3:.*]] = arith.subi %[[CONVI_64_3]], %[[C1_64]] : i64
-!CHECK: %[[W_PVT_REF_3:.*]] = fir.coordinate_of %[[ARRAY]], %[[SUBI_3]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[WVAL:.*]] = fir.load %[[W_PVT_REF_3]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[WVAL]], %[[ZREF]] : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+! CHECK-LABEL: func.func @_QPmultiple_reductions(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "w"}) {
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFmultiple_reductionsEi"}
+! CHECK: %[[VAL_2:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFmultiple_reductionsEx"}
+! CHECK: %[[VAL_3:.*]] = fir.alloca !fir.logical<4> {bindc_name = "y", uniq_name = "_QFmultiple_reductionsEy"}
+! CHECK: %[[VAL_4:.*]] = fir.alloca !fir.logical<4> {bindc_name = "z", uniq_name = "_QFmultiple_reductionsEz"}
+! CHECK: %[[VAL_5:.*]] = arith.constant true
+! CHECK: %[[VAL_6:.*]] = fir.convert %[[VAL_5]] : (i1) -> !fir.logical<4>
+! CHECK: fir.store %[[VAL_6]] to %[[VAL_2]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_7:.*]] = arith.constant true
+! CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_7]] : (i1) -> !fir.logical<4>
+! CHECK: fir.store %[[VAL_8]] to %[[VAL_3]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_9:.*]] = arith.constant true
+! CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_9]] : (i1) -> !fir.logical<4>
+! CHECK: fir.store %[[VAL_10]] to %[[VAL_4]] : !fir.ref<!fir.logical<4>>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@neqv_reduction %[[VAL_2]] -> %[[VAL_15:.*]] : !fir.ref<!fir.logical<4>>, @neqv_reduction %[[VAL_3]] -> %[[VAL_16:.*]] : !fir.ref<!fir.logical<4>>, @neqv_reduction %[[VAL_4]] -> %[[VAL_17:.*]] : !fir.ref<!fir.logical<4>>) for (%[[VAL_18:.*]]) : i32 = (%[[VAL_12]]) to (%[[VAL_13]]) inclusive step (%[[VAL_14]]) {
+! CHECK: fir.store %[[VAL_18]] to %[[VAL_11]] : !fir.ref<i32>
+! CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_15]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_20:.*]] = fir.load %[[VAL_11]] : !fir.ref<i32>
+! CHECK: %[[VAL_21:.*]] = fir.convert %[[VAL_20]] : (i32) -> i64
+! CHECK: %[[VAL_22:.*]] = arith.constant 1 : i64
+! CHECK: %[[VAL_23:.*]] = arith.subi %[[VAL_21]], %[[VAL_22]] : i64
+! CHECK: %[[VAL_24:.*]] = fir.coordinate_of %[[VAL_0]], %[[VAL_23]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_25:.*]] = fir.load %[[VAL_24]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_26:.*]] = fir.convert %[[VAL_19]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_27:.*]] = fir.convert %[[VAL_25]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_28:.*]] = arith.cmpi ne, %[[VAL_26]], %[[VAL_27]] : i1
+! CHECK: %[[VAL_29:.*]] = fir.convert %[[VAL_28]] : (i1) -> !fir.logical<4>
+! CHECK: fir.store %[[VAL_29]] to %[[VAL_15]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_30:.*]] = fir.load %[[VAL_16]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_31:.*]] = fir.load %[[VAL_11]] : !fir.ref<i32>
+! CHECK: %[[VAL_32:.*]] = fir.convert %[[VAL_31]] : (i32) -> i64
+! CHECK: %[[VAL_33:.*]] = arith.constant 1 : i64
+! CHECK: %[[VAL_34:.*]] = arith.subi %[[VAL_32]], %[[VAL_33]] : i64
+! CHECK: %[[VAL_35:.*]] = fir.coordinate_of %[[VAL_0]], %[[VAL_34]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_36:.*]] = fir.load %[[VAL_35]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_37:.*]] = fir.convert %[[VAL_30]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_38:.*]] = fir.convert %[[VAL_36]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_39:.*]] = arith.cmpi ne, %[[VAL_37]], %[[VAL_38]] : i1
+! CHECK: %[[VAL_40:.*]] = fir.convert %[[VAL_39]] : (i1) -> !fir.logical<4>
+! CHECK: fir.store %[[VAL_40]] to %[[VAL_16]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_41:.*]] = fir.load %[[VAL_17]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_42:.*]] = fir.load %[[VAL_11]] : !fir.ref<i32>
+! CHECK: %[[VAL_43:.*]] = fir.convert %[[VAL_42]] : (i32) -> i64
+! CHECK: %[[VAL_44:.*]] = arith.constant 1 : i64
+! CHECK: %[[VAL_45:.*]] = arith.subi %[[VAL_43]], %[[VAL_44]] : i64
+! CHECK: %[[VAL_46:.*]] = fir.coordinate_of %[[VAL_0]], %[[VAL_45]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_47:.*]] = fir.load %[[VAL_46]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_48:.*]] = fir.convert %[[VAL_41]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_49:.*]] = fir.convert %[[VAL_47]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_50:.*]] = arith.cmpi ne, %[[VAL_48]], %[[VAL_49]] : i1
+! CHECK: %[[VAL_51:.*]] = fir.convert %[[VAL_50]] : (i1) -> !fir.logical<4>
+! CHECK: fir.store %[[VAL_51]] to %[[VAL_17]] : !fir.ref<!fir.logical<4>>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: return
+
+
subroutine multiple_reductions(w)
logical :: x,y,z,w(100)
x = .true.
diff --git a/flang/test/Lower/OpenMP/FIR/wsloop-reduction-logical-or.f90 b/flang/test/Lower/OpenMP/FIR/wsloop-reduction-logical-or.f90
deleted file mode 100644
index 597014c..0000000
--- a/flang/test/Lower/OpenMP/FIR/wsloop-reduction-logical-or.f90
+++ /dev/null
@@ -1,137 +0,0 @@
-! RUN: bbc -emit-fir -hlfir=false -fopenmp %s -o - | FileCheck %s
-! RUN: %flang_fc1 -emit-fir -flang-deprecated-no-hlfir -fopenmp %s -o - | FileCheck %s
-
-!CHECK-LABEL: omp.reduction.declare
-!CHECK-SAME: @[[RED_NAME:.*]] : !fir.logical<4> init {
-!CHECK: ^bb0(%{{.*}}: !fir.logical<4>):
-!CHECK: %false = arith.constant false
-!CHECK: %[[false_fir:.*]] = fir.convert %false : (i1) -> !fir.logical<4>
-!CHECK: omp.yield(%[[false_fir]] : !fir.logical<4>)
-!CHECK: } combiner {
-!CHECK: ^bb0(%[[ARG0:.*]]: !fir.logical<4>, %[[ARG1:.*]]: !fir.logical<4>):
-!CHECK: %[[arg0_i1:.*]] = fir.convert %[[ARG0]] : (!fir.logical<4>) -> i1
-!CHECK: %[[arg1_i1:.*]] = fir.convert %[[ARG1]] : (!fir.logical<4>) -> i1
-!CHECK: %[[RES:.*]] = arith.ori %[[arg0_i1]], %[[arg1_i1]] : i1
-!CHECK: %[[RES_logical:.*]] = fir.convert %[[RES]] : (i1) -> !fir.logical<4>
-!CHECK: omp.yield(%[[RES_logical]] : !fir.logical<4>)
-!CHECK: }
-
-!CHECK-LABEL: func.func @_QPsimple_reduction(
-!CHECK-SAME: %[[ARRAY:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
-!CHECK: %[[IREF:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reductionEi"}
-!CHECK: %[[XREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reductionEx"}
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_NAME]] -> %[[XREF]] : !fir.ref<!fir.logical<4>>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]]) {
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[CONVI_64:.*]] = fir.convert %[[I_PVT_VAL]] : (i32) -> i64
-!CHECK: %[[C1_64:.*]] = arith.constant 1 : i64
-!CHECK: %[[SUBI:.*]] = arith.subi %[[CONVI_64]], %[[C1_64]] : i64
-!CHECK: %[[Y_PVT_REF:.*]] = fir.coordinate_of %[[ARRAY]], %[[SUBI]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[YVAL:.*]] = fir.load %[[Y_PVT_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[YVAL]], %[[XREF]] : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
-subroutine simple_reduction(y)
- logical :: x, y(100)
- x = .true.
- !$omp parallel
- !$omp do reduction(.or.:x)
- do i=1, 100
- x = x .or. y(i)
- end do
- !$omp end do
- !$omp end parallel
-end subroutine
-
-!CHECK-LABEL: func.func @_QPsimple_reduction_switch_order(
-!CHECK-SAME: %[[ARRAY:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
-!CHECK: %[[IREF:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reduction_switch_orderEi"}
-!CHECK: %[[XREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reduction_switch_orderEx"}
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_NAME]] -> %[[XREF]] : !fir.ref<!fir.logical<4>>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]]) {
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[CONVI_64:.*]] = fir.convert %[[I_PVT_VAL]] : (i32) -> i64
-!CHECK: %[[C1_64:.*]] = arith.constant 1 : i64
-!CHECK: %[[SUBI:.*]] = arith.subi %[[CONVI_64]], %[[C1_64]] : i64
-!CHECK: %[[Y_PVT_REF:.*]] = fir.coordinate_of %[[ARRAY]], %[[SUBI]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[YVAL:.*]] = fir.load %[[Y_PVT_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[YVAL]], %[[XREF]] : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
-subroutine simple_reduction_switch_order(y)
- logical :: x, y(100)
- x = .true.
- !$omp parallel
- !$omp do reduction(.or.:x)
- do i=1, 100
- x = y(i) .or. x
- end do
- !$omp end do
- !$omp end parallel
-end subroutine
-
-!CHECK-LABEL: func.func @_QPmultiple_reductions
-!CHECK-SAME %[[ARRAY:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "w"}) {
-!CHECK: %[[IREF:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFmultiple_reductionsEi"}
-!CHECK: %[[XREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFmultiple_reductionsEx"}
-!CHECK: %[[YREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "y", uniq_name = "_QFmultiple_reductionsEy"}
-!CHECK: %[[ZREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "z", uniq_name = "_QFmultiple_reductionsEz"}
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_NAME]] -> %[[XREF]] : !fir.ref<!fir.logical<4>>, @[[RED_NAME]] -> %[[YREF]] : !fir.ref<!fir.logical<4>>, @[[RED_NAME]] -> %[[ZREF]] : !fir.ref<!fir.logical<4>>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]]) {
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL1:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[CONVI_64_1:.*]] = fir.convert %[[I_PVT_VAL1]] : (i32) -> i64
-!CHECK: %[[C1_64:.*]] = arith.constant 1 : i64
-!CHECK: %[[SUBI_1:.*]] = arith.subi %[[CONVI_64_1]], %[[C1_64]] : i64
-!CHECK: %[[W_PVT_REF_1:.*]] = fir.coordinate_of %[[ARRAY]], %[[SUBI_1]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[WVAL:.*]] = fir.load %[[W_PVT_REF_1]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[WVAL]], %[[XREF]] : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: %[[I_PVT_VAL2:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[CONVI_64_2:.*]] = fir.convert %[[I_PVT_VAL2]] : (i32) -> i64
-!CHECK: %[[C1_64:.*]] = arith.constant 1 : i64
-!CHECK: %[[SUBI_2:.*]] = arith.subi %[[CONVI_64_2]], %[[C1_64]] : i64
-!CHECK: %[[W_PVT_REF_2:.*]] = fir.coordinate_of %[[ARRAY]], %[[SUBI_2]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[WVAL:.*]] = fir.load %[[W_PVT_REF_2]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[WVAL]], %[[YREF]] : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: %[[I_PVT_VAL3:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[CONVI_64_3:.*]] = fir.convert %[[I_PVT_VAL3]] : (i32) -> i64
-!CHECK: %[[C1_64:.*]] = arith.constant 1 : i64
-!CHECK: %[[SUBI_3:.*]] = arith.subi %[[CONVI_64_3]], %[[C1_64]] : i64
-!CHECK: %[[W_PVT_REF_3:.*]] = fir.coordinate_of %[[ARRAY]], %[[SUBI_3]] : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[WVAL:.*]] = fir.load %[[W_PVT_REF_3]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[WVAL]], %[[ZREF]] : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
-subroutine multiple_reductions(w)
- logical :: x,y,z,w(100)
- x = .true.
- y = .true.
- z = .true.
- !$omp parallel
- !$omp do reduction(.or.:x,y,z)
- do i=1, 100
- x = x .or. w(i)
- y = y .or. w(i)
- z = z .or. w(i)
- end do
- !$omp end do
- !$omp end parallel
-end subroutine
-
diff --git a/flang/test/Lower/OpenMP/FIR/wsloop-reduction-max.f90 b/flang/test/Lower/OpenMP/FIR/wsloop-reduction-max.f90
index 0f01b46..af79658 100644
--- a/flang/test/Lower/OpenMP/FIR/wsloop-reduction-max.f90
+++ b/flang/test/Lower/OpenMP/FIR/wsloop-reduction-max.f90
@@ -21,21 +21,24 @@
!CHECK-SAME: %[[Y_BOX:.*]]: !fir.box<!fir.array<?xi32>>
!CHECK: %[[X_REF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFreduction_max_intEx"}
!CHECK: omp.parallel
-!CHECK: omp.wsloop reduction(@[[MAX_DECLARE_I]] -> %[[X_REF]] : !fir.ref<i32>) for
+!CHECK: omp.wsloop reduction(@[[MAX_DECLARE_I]] %[[X_REF]] -> %[[PRV:.+]] : !fir.ref<i32>) for
+!CHECK: %[[LPRV:.+]] = fir.load %[[PRV]] : !fir.ref<i32>
!CHECK: %[[Y_I_REF:.*]] = fir.coordinate_of %[[Y_BOX]]
!CHECK: %[[Y_I:.*]] = fir.load %[[Y_I_REF]] : !fir.ref<i32>
-!CHECK: omp.reduction %[[Y_I]], %[[X_REF]] : i32, !fir.ref<i32>
-!CHECK: omp.yield
+!CHECK: %[[RES:.+]] = arith.cmpi sgt, %[[LPRV]], %[[Y_I]] : i32
+!CHECK: %[[SEL:.+]] = arith.select %[[RES]], %[[LPRV]], %[[Y_I]]
+!CHECK: fir.store %[[SEL]] to %[[PRV]] : !fir.ref<i32>
!CHECK: omp.terminator
!CHECK-LABEL: @_QPreduction_max_real
!CHECK-SAME: %[[Y_BOX:.*]]: !fir.box<!fir.array<?xf32>>
!CHECK: %[[X_REF:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFreduction_max_realEx"}
!CHECK: omp.parallel
-!CHECK: omp.wsloop reduction(@[[MAX_DECLARE_F]] -> %[[X_REF]] : !fir.ref<f32>) for
+!CHECK: omp.wsloop reduction(@[[MAX_DECLARE_F]] %[[X_REF]] -> %[[PRV:.+]] : !fir.ref<f32>) for
+!CHECK: %[[LPRV:.+]] = fir.load %[[PRV]] : !fir.ref<f32>
!CHECK: %[[Y_I_REF:.*]] = fir.coordinate_of %[[Y_BOX]]
!CHECK: %[[Y_I:.*]] = fir.load %[[Y_I_REF]] : !fir.ref<f32>
-!CHECK: omp.reduction %[[Y_I]], %[[X_REF]] : f32, !fir.ref<f32>
+!CHECK: %[[RES:.+]] = arith.cmpf ogt, %[[Y_I]], %[[LPRV]] {{.*}} : f32
!CHECK: omp.yield
!CHECK: omp.terminator
diff --git a/flang/test/Lower/OpenMP/FIR/wsloop-reduction-min.f90 b/flang/test/Lower/OpenMP/FIR/wsloop-reduction-min.f90
index 22cdd41..1095718 100644
--- a/flang/test/Lower/OpenMP/FIR/wsloop-reduction-min.f90
+++ b/flang/test/Lower/OpenMP/FIR/wsloop-reduction-min.f90
@@ -21,10 +21,13 @@
!CHECK-SAME: %[[Y_BOX:.*]]: !fir.box<!fir.array<?xi32>>
!CHECK: %[[X_REF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFreduction_min_intEx"}
!CHECK: omp.parallel
-!CHECK: omp.wsloop reduction(@[[MIN_DECLARE_I]] -> %[[X_REF]] : !fir.ref<i32>) for
+!CHECK: omp.wsloop reduction(@[[MIN_DECLARE_I]] %[[X_REF]] -> %[[PRV:.+]] : !fir.ref<i32>) for
+!CHECK: %[[LPRV:.+]] = fir.load %[[PRV]] : !fir.ref<i32>
!CHECK: %[[Y_I_REF:.*]] = fir.coordinate_of %[[Y_BOX]]
!CHECK: %[[Y_I:.*]] = fir.load %[[Y_I_REF]] : !fir.ref<i32>
-!CHECK: omp.reduction %[[Y_I]], %[[X_REF]] : i32, !fir.ref<i32>
+!CHECK: %[[RES:.+]] = arith.cmpi slt, %[[LPRV]], %[[Y_I]] : i32
+!CHECK: %[[SEL:.+]] = arith.select %[[RES]], %[[LPRV]], %[[Y_I]]
+!CHECK: fir.store %[[SEL]] to %[[PRV]] : !fir.ref<i32>
!CHECK: omp.yield
!CHECK: omp.terminator
@@ -32,10 +35,11 @@
!CHECK-SAME: %[[Y_BOX:.*]]: !fir.box<!fir.array<?xf32>>
!CHECK: %[[X_REF:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFreduction_min_realEx"}
!CHECK: omp.parallel
-!CHECK: omp.wsloop reduction(@[[MIN_DECLARE_F]] -> %[[X_REF]] : !fir.ref<f32>) for
+!CHECK: omp.wsloop reduction(@[[MIN_DECLARE_F]] %[[X_REF]] -> %[[PRV:.+]] : !fir.ref<f32>) for
+!CHECK: %[[LPRV:.+]] = fir.load %[[PRV]] : !fir.ref<f32>
!CHECK: %[[Y_I_REF:.*]] = fir.coordinate_of %[[Y_BOX]]
!CHECK: %[[Y_I:.*]] = fir.load %[[Y_I_REF]] : !fir.ref<f32>
-!CHECK: omp.reduction %[[Y_I]], %[[X_REF]] : f32, !fir.ref<f32>
+!CHECK: %[[RES:.+]] = arith.cmpf ogt, %[[Y_I]], %[[LPRV]] {{.*}} : f32
!CHECK: omp.yield
!CHECK: omp.terminator
diff --git a/flang/test/Lower/OpenMP/FIR/wsloop-reduction-mul.f90 b/flang/test/Lower/OpenMP/FIR/wsloop-reduction-mul.f90
deleted file mode 100644
index 1c27f55..0000000
--- a/flang/test/Lower/OpenMP/FIR/wsloop-reduction-mul.f90
+++ /dev/null
@@ -1,274 +0,0 @@
-! RUN: bbc -emit-fir -hlfir=false -fopenmp %s -o - | FileCheck %s
-! RUN: %flang_fc1 -emit-fir -flang-deprecated-no-hlfir -fopenmp %s -o - | FileCheck %s
-
-!CHECK-LABEL: omp.reduction.declare
-!CHECK-SAME: @[[RED_F64_NAME:.*]] : f64 init {
-!CHECK: ^bb0(%{{.*}}: f64):
-!CHECK: %[[C0_1:.*]] = arith.constant 1.000000e+00 : f64
-!CHECK: omp.yield(%[[C0_1]] : f64)
-!CHECK: } combiner {
-!CHECK: ^bb0(%[[ARG0:.*]]: f64, %[[ARG1:.*]]: f64):
-!CHECK: %[[RES:.*]] = arith.mulf %[[ARG0]], %[[ARG1]] {{.*}}: f64
-!CHECK: omp.yield(%[[RES]] : f64)
-!CHECK: }
-
-!CHECK-LABEL: omp.reduction.declare
-!CHECK-SAME: @[[RED_I64_NAME:.*]] : i64 init {
-!CHECK: ^bb0(%{{.*}}: i64):
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i64
-!CHECK: omp.yield(%[[C1_1]] : i64)
-!CHECK: } combiner {
-!CHECK: ^bb0(%[[ARG0:.*]]: i64, %[[ARG1:.*]]: i64):
-!CHECK: %[[RES:.*]] = arith.muli %[[ARG0]], %[[ARG1]] : i64
-!CHECK: omp.yield(%[[RES]] : i64)
-!CHECK: }
-
-!CHECK-LABEL: omp.reduction.declare
-!CHECK-SAME: @[[RED_F32_NAME:.*]] : f32 init {
-!CHECK: ^bb0(%{{.*}}: f32):
-!CHECK: %[[C0_1:.*]] = arith.constant 1.000000e+00 : f32
-!CHECK: omp.yield(%[[C0_1]] : f32)
-!CHECK: } combiner {
-!CHECK: ^bb0(%[[ARG0:.*]]: f32, %[[ARG1:.*]]: f32):
-!CHECK: %[[RES:.*]] = arith.mulf %[[ARG0]], %[[ARG1]] {{.*}}: f32
-!CHECK: omp.yield(%[[RES]] : f32)
-!CHECK: }
-
-!CHECK-LABEL: omp.reduction.declare
-!CHECK-SAME: @[[RED_I32_NAME:.*]] : i32 init {
-!CHECK: ^bb0(%{{.*}}: i32):
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: omp.yield(%[[C1_1]] : i32)
-!CHECK: } combiner {
-!CHECK: ^bb0(%[[ARG0:.*]]: i32, %[[ARG1:.*]]: i32):
-!CHECK: %[[RES:.*]] = arith.muli %[[ARG0]], %[[ARG1]] : i32
-!CHECK: omp.yield(%[[RES]] : i32)
-!CHECK: }
-
-!CHECK-LABEL: func.func @_QPsimple_int_reduction
-!CHECK: %[[XREF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFsimple_int_reductionEx"}
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: fir.store %[[C1_2]] to %[[XREF]] : !fir.ref<i32>
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C10:.*]] = arith.constant 10 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_I32_NAME]] -> %[[XREF]] : !fir.ref<i32>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C10]]) inclusive step (%[[C1_2]])
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: omp.reduction %[[I_PVT_VAL]], %[[XREF]] : i32, !fir.ref<i32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
-
-subroutine simple_int_reduction
- integer :: x
- x = 1
- !$omp parallel
- !$omp do reduction(*:x)
- do i=1, 10
- x = x * i
- end do
- !$omp end do
- !$omp end parallel
-end subroutine
-
-!CHECK-LABEL: func.func @_QPsimple_real_reduction
-!CHECK: %[[XREF:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFsimple_real_reductionEx"}
-!CHECK: %[[C0_2:.*]] = arith.constant 1.000000e+00 : f32
-!CHECK: fir.store %[[C0_2]] to %[[XREF]] : !fir.ref<f32>
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 10 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_F32_NAME]] -> %[[XREF]] : !fir.ref<f32>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]])
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL_i32:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL_f32:.*]] = fir.convert %[[I_PVT_VAL_i32]] : (i32) -> f32
-!CHECK: omp.reduction %[[I_PVT_VAL_f32]], %[[XREF]] : f32, !fir.ref<f32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
-subroutine simple_real_reduction
- real :: x
- x = 1.0
- !$omp parallel
- !$omp do reduction(*:x)
- do i=1, 10
- x = x * i
- end do
- !$omp end do
- !$omp end parallel
-end subroutine
-
-!CHECK-LABEL: func.func @_QPsimple_int_reduction_switch_order
-!CHECK: %[[XREF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFsimple_int_reduction_switch_orderEx"}
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: fir.store %[[C1_2]] to %[[XREF]] : !fir.ref<i32>
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C10:.*]] = arith.constant 10 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_I32_NAME]] -> %[[XREF]] : !fir.ref<i32>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C10]]) inclusive step (%[[C1_2]])
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: omp.reduction %[[I_PVT_VAL]], %[[XREF]] : i32, !fir.ref<i32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
-subroutine simple_int_reduction_switch_order
- integer :: x
- x = 1
- !$omp parallel
- !$omp do reduction(*:x)
- do i=1, 10
- x = i * x
- end do
- !$omp end do
- !$omp end parallel
-end subroutine
-
-!CHECK-LABEL: func.func @_QPsimple_real_reduction_switch_order
-!CHECK: %[[XREF:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFsimple_real_reduction_switch_orderEx"}
-!CHECK: %[[C0_2:.*]] = arith.constant 1.000000e+00 : f32
-!CHECK: fir.store %[[C0_2]] to %[[XREF]] : !fir.ref<f32>
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 10 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_F32_NAME]] -> %[[XREF]] : !fir.ref<f32>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]])
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL_i32:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL_f32:.*]] = fir.convert %[[I_PVT_VAL_i32]] : (i32) -> f32
-!CHECK: omp.reduction %[[I_PVT_VAL_f32]], %[[XREF]] : f32, !fir.ref<f32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
-subroutine simple_real_reduction_switch_order
- real :: x
- x = 1.0
- !$omp parallel
- !$omp do reduction(*:x)
- do i=1, 10
- x = i * x
- end do
- !$omp end do
- !$omp end parallel
-end subroutine
-
-!CHECK-LABEL: func.func @_QPmultiple_int_reductions_same_type
-!CHECK: %[[XREF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFmultiple_int_reductions_same_typeEx"}
-!CHECK: %[[YREF:.*]] = fir.alloca i32 {bindc_name = "y", uniq_name = "_QFmultiple_int_reductions_same_typeEy"}
-!CHECK: %[[ZREF:.*]] = fir.alloca i32 {bindc_name = "z", uniq_name = "_QFmultiple_int_reductions_same_typeEz"}
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: omp.wsloop reduction(@[[RED_I32_NAME]] -> %[[XREF]] : !fir.ref<i32>, @[[RED_I32_NAME]] -> %[[YREF]] : !fir.ref<i32>, @[[RED_I32_NAME]] -> %[[ZREF]] : !fir.ref<i32>) for (%[[IVAL]]) : i32
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL1:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: omp.reduction %[[I_PVT_VAL1]], %[[XREF]] : i32, !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL2:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: omp.reduction %[[I_PVT_VAL2]], %[[YREF]] : i32, !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL3:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: omp.reduction %[[I_PVT_VAL3]], %[[ZREF]] : i32, !fir.ref<i32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
-subroutine multiple_int_reductions_same_type
- integer :: x,y,z
- x = 1
- y = 1
- z = 1
- !$omp parallel
- !$omp do reduction(*:x,y,z)
- do i=1, 10
- x = x * i
- y = y * i
- z = z * i
- end do
- !$omp end do
- !$omp end parallel
-end subroutine
-
-!CHECK-LABEL: func.func @_QPmultiple_real_reductions_same_type
-!CHECK: %[[XREF:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFmultiple_real_reductions_same_typeEx"}
-!CHECK: %[[YREF:.*]] = fir.alloca f32 {bindc_name = "y", uniq_name = "_QFmultiple_real_reductions_same_typeEy"}
-!CHECK: %[[ZREF:.*]] = fir.alloca f32 {bindc_name = "z", uniq_name = "_QFmultiple_real_reductions_same_typeEz"}
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: omp.wsloop reduction(@[[RED_F32_NAME]] -> %[[XREF]] : !fir.ref<f32>, @[[RED_F32_NAME]] -> %[[YREF]] : !fir.ref<f32>, @[[RED_F32_NAME]] -> %[[ZREF]] : !fir.ref<f32>) for (%[[IVAL]]) : i32
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL1_I32:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL1_F32:.*]] = fir.convert %[[I_PVT_VAL1_I32]] : (i32) -> f32
-!CHECK: omp.reduction %[[I_PVT_VAL1_F32]], %[[XREF]] : f32, !fir.ref<f32>
-!CHECK: %[[I_PVT_VAL2_I32:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL2_F32:.*]] = fir.convert %[[I_PVT_VAL2_I32]] : (i32) -> f32
-!CHECK: omp.reduction %[[I_PVT_VAL2_F32]], %[[YREF]] : f32, !fir.ref<f32>
-!CHECK: %[[I_PVT_VAL3_I32:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL3_F32:.*]] = fir.convert %[[I_PVT_VAL3_I32]] : (i32) -> f32
-!CHECK: omp.reduction %[[I_PVT_VAL3_F32]], %[[ZREF]] : f32, !fir.ref<f32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
-subroutine multiple_real_reductions_same_type
- real :: x,y,z
- x = 1
- y = 1
- z = 1
- !$omp parallel
- !$omp do reduction(*:x,y,z)
- do i=1, 10
- x = x * i
- y = y * i
- z = z * i
- end do
- !$omp end do
- !$omp end parallel
-end subroutine
-
-!CHECK-LABEL: func.func @_QPmultiple_reductions_different_type
-!CHECK: %[[WREF:.*]] = fir.alloca f64 {bindc_name = "w", uniq_name = "_QFmultiple_reductions_different_typeEw"}
-!CHECK: %[[XREF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFmultiple_reductions_different_typeEx"}
-!CHECK: %[[YREF:.*]] = fir.alloca i64 {bindc_name = "y", uniq_name = "_QFmultiple_reductions_different_typeEy"}
-!CHECK: %[[ZREF:.*]] = fir.alloca f32 {bindc_name = "z", uniq_name = "_QFmultiple_reductions_different_typeEz"}
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: omp.wsloop reduction(@[[RED_I32_NAME]] -> %2 : !fir.ref<i32>, @[[RED_I64_NAME]] -> %3 : !fir.ref<i64>, @[[RED_F32_NAME]] -> %4 : !fir.ref<f32>, @[[RED_F64_NAME]] -> %1 : !fir.ref<f64>) for (%[[IVAL:.*]]) : i32
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL1_I32:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: omp.reduction %[[I_PVT_VAL1_I32]], %[[XREF]] : i32, !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL2_I32:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL2_I64:.*]] = fir.convert %[[I_PVT_VAL2_I32]] : (i32) -> i64
-!CHECK: omp.reduction %[[I_PVT_VAL2_I64]], %[[YREF]] : i64, !fir.ref<i64>
-!CHECK: %[[I_PVT_VAL3_I32:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL3_F32:.*]] = fir.convert %[[I_PVT_VAL3_I32]] : (i32) -> f32
-!CHECK: omp.reduction %[[I_PVT_VAL3_F32]], %[[ZREF]] : f32, !fir.ref<f32>
-!CHECK: %[[I_PVT_VAL4_I32:.*]] = fir.load %[[I_PVT_REF]] : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL4_F64:.*]] = fir.convert %[[I_PVT_VAL4_I32]] : (i32) -> f64
-!CHECK: omp.reduction %[[I_PVT_VAL4_F64]], %[[WREF]] : f64, !fir.ref<f64>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
-subroutine multiple_reductions_different_type
- integer :: x
- integer(kind=8) :: y
- real :: z
- real(kind=8) :: w
- x = 1
- y = 1
- z = 1
- w = 1
- !$omp parallel
- !$omp do reduction(*:x,y,z,w)
- do i=1, 10
- x = x * i
- y = y * i
- z = z * i
- w = w * i
- end do
- !$omp end do
- !$omp end parallel
-end subroutine
diff --git a/flang/test/Lower/OpenMP/default-clause.f90 b/flang/test/Lower/OpenMP/default-clause.f90
index 0a7443e..0e11874 100644
--- a/flang/test/Lower/OpenMP/default-clause.f90
+++ b/flang/test/Lower/OpenMP/default-clause.f90
@@ -352,7 +352,7 @@ subroutine skipped_default_clause_checks()
type(it)::iii
!CHECK: omp.parallel {
-!CHECK: omp.wsloop reduction(@min_i_32 -> %[[VAL_Z_DECLARE]]#0 : !fir.ref<i32>) for (%[[ARG:.*]]) {{.*}} {
+!CHECK: omp.wsloop reduction(@min_i_32 %[[VAL_Z_DECLARE]]#0 -> %[[PRV:.+]] : !fir.ref<i32>) for (%[[ARG:.*]]) {{.*}} {
!CHECK: omp.yield
!CHECK: }
!CHECK: omp.terminator
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-add-hlfir.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-add-hlfir.f90
index 97ee665..4d30282 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-add-hlfir.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-add-hlfir.f90
@@ -1,35 +1,44 @@
! RUN: bbc -emit-hlfir -fopenmp %s -o - | FileCheck %s
! RUN: %flang_fc1 -emit-hlfir -fopenmp %s -o - | FileCheck %s
-!CHECK-LABEL: omp.reduction.declare
-!CHECK-SAME: @[[RED_I32_NAME:.*]] : i32 init {
-!CHECK: ^bb0(%{{.*}}: i32):
-!CHECK: %[[C0_1:.*]] = arith.constant 0 : i32
-!CHECK: omp.yield(%[[C0_1]] : i32)
-!CHECK: } combiner {
-!CHECK: ^bb0(%[[ARG0:.*]]: i32, %[[ARG1:.*]]: i32):
-!CHECK: %[[RES:.*]] = arith.addi %[[ARG0]], %[[ARG1]] : i32
-!CHECK: omp.yield(%[[RES]] : i32)
-!CHECK: }
+! NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
+
+! CHECK-LABEL: omp.reduction.declare @add_reduction_i_32 : i32 init {
+! CHECK: ^bb0(%[[VAL_0:.*]]: i32):
+! CHECK: %[[VAL_1:.*]] = arith.constant 0 : i32
+! CHECK: omp.yield(%[[VAL_1]] : i32)
+
+! CHECK-LABEL: } combiner {
+! CHECK: ^bb0(%[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: i32):
+! CHECK: %[[VAL_2:.*]] = arith.addi %[[VAL_0]], %[[VAL_1]] : i32
+! CHECK: omp.yield(%[[VAL_2]] : i32)
+! CHECK: }
+
+! CHECK-LABEL: func.func @_QPsimple_int_reduction()
+! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_int_reductionEi"}
+! CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFsimple_int_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_2:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFsimple_int_reductionEx"}
+! CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFsimple_int_reductionEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_4:.*]] = arith.constant 0 : i32
+! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : i32, !fir.ref<i32>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_8:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@add_reduction_i_32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref<i32>) for (%[[VAL_11:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]])
+! CHECK: fir.store %[[VAL_11]] to %[[VAL_6]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_int_reductionEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_14:.*]] = fir.load %[[VAL_6]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_15:.*]] = arith.addi %[[VAL_13]], %[[VAL_14]] : i32
+! CHECK: hlfir.assign %[[VAL_15]] to %[[VAL_12]]#0 : i32, !fir.ref<i32>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: return
+
-!CHECK-LABEL: func.func @_QPsimple_int_reduction
-!CHECK: %[[XREF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFsimple_int_reductionEx"}
-!CHECK: %[[XDECL:.*]]:2 = hlfir.declare %[[XREF]] {uniq_name = "_QFsimple_int_reductionEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[C0_2:.*]] = arith.constant 0 : i32
-!CHECK: hlfir.assign %[[C0_2]] to %[[XDECL]]#0 : i32, !fir.ref<i32>
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFsimple_int_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_I32_NAME]] -> %[[XDECL]]#0 : !fir.ref<i32>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]])
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: omp.reduction %[[I_PVT_VAL]], %[[XDECL]]#0 : i32, !fir.ref<i32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
subroutine simple_int_reduction
integer :: x
x = 0
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-add.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-add.f90
index 92c0075..7df4f37 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-add.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-add.f90
@@ -1,68 +1,87 @@
! RUN: bbc -emit-hlfir -fopenmp %s -o - | FileCheck %s
! RUN: %flang_fc1 -emit-hlfir -fopenmp %s -o - | FileCheck %s
-!CHECK-LABEL: omp.reduction.declare
-!CHECK-SAME: @[[RED_F64_NAME:.*]] : f64 init {
-!CHECK: ^bb0(%{{.*}}: f64):
-!CHECK: %[[C0_1:.*]] = arith.constant 0.000000e+00 : f64
-!CHECK: omp.yield(%[[C0_1]] : f64)
-!CHECK: } combiner {
-!CHECK: ^bb0(%[[ARG0:.*]]: f64, %[[ARG1:.*]]: f64):
-!CHECK: %[[RES:.*]] = arith.addf %[[ARG0]], %[[ARG1]] {{.*}}: f64
-!CHECK: omp.yield(%[[RES]] : f64)
-!CHECK: }
-!CHECK-LABEL: omp.reduction.declare
-!CHECK-SAME: @[[RED_I64_NAME:.*]] : i64 init {
-!CHECK: ^bb0(%{{.*}}: i64):
-!CHECK: %[[C0_1:.*]] = arith.constant 0 : i64
-!CHECK: omp.yield(%[[C0_1]] : i64)
-!CHECK: } combiner {
-!CHECK: ^bb0(%[[ARG0:.*]]: i64, %[[ARG1:.*]]: i64):
-!CHECK: %[[RES:.*]] = arith.addi %[[ARG0]], %[[ARG1]] : i64
-!CHECK: omp.yield(%[[RES]] : i64)
-!CHECK: }
+! NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
-!CHECK-LABEL: omp.reduction.declare
-!CHECK-SAME: @[[RED_F32_NAME:.*]] : f32 init {
-!CHECK: ^bb0(%{{.*}}: f32):
-!CHECK: %[[C0_1:.*]] = arith.constant 0.000000e+00 : f32
-!CHECK: omp.yield(%[[C0_1]] : f32)
-!CHECK: } combiner {
-!CHECK: ^bb0(%[[ARG0:.*]]: f32, %[[ARG1:.*]]: f32):
-!CHECK: %[[RES:.*]] = arith.addf %[[ARG0]], %[[ARG1]] {{.*}}: f32
-!CHECK: omp.yield(%[[RES]] : f32)
-!CHECK: }
+! The script is designed to make adding checks to
+! a test case fast, it is *not* designed to be authoritative
+! about what constitutes a good test! The CHECK should be
+! minimized and named to reflect the test intent.
-!CHECK-LABEL: omp.reduction.declare
-!CHECK-SAME: @[[RED_I32_NAME:.*]] : i32 init {
-!CHECK: ^bb0(%{{.*}}: i32):
-!CHECK: %[[C0_1:.*]] = arith.constant 0 : i32
-!CHECK: omp.yield(%[[C0_1]] : i32)
-!CHECK: } combiner {
-!CHECK: ^bb0(%[[ARG0:.*]]: i32, %[[ARG1:.*]]: i32):
-!CHECK: %[[RES:.*]] = arith.addi %[[ARG0]], %[[ARG1]] : i32
-!CHECK: omp.yield(%[[RES]] : i32)
-!CHECK: }
-!CHECK-LABEL: func.func @_QPsimple_int_reduction
-!CHECK: %[[XREF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFsimple_int_reductionEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[XREF]] {uniq_name = "_QFsimple_int_reductionEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[C0_2:.*]] = arith.constant 0 : i32
-!CHECK: hlfir.assign %[[C0_2]] to %[[X_DECL]]#0 : i32, !fir.ref<i32>
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFsimple_int_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_I32_NAME]] -> %[[X_DECL]]#0 : !fir.ref<i32>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]])
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: omp.reduction %[[I_PVT_VAL]], %[[X_DECL]]#0 : i32, !fir.ref<i32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+
+! CHECK-LABEL: omp.reduction.declare @add_reduction_f_64 : f64 init {
+! CHECK: ^bb0(%[[VAL_0:.*]]: f64):
+! CHECK: %[[VAL_1:.*]] = arith.constant 0.000000e+00 : f64
+! CHECK: omp.yield(%[[VAL_1]] : f64)
+
+! CHECK-LABEL: } combiner {
+! CHECK: ^bb0(%[[VAL_0:.*]]: f64, %[[VAL_1:.*]]: f64):
+! CHECK: %[[VAL_2:.*]] = arith.addf %[[VAL_0]], %[[VAL_1]] fastmath<contract> : f64
+! CHECK: omp.yield(%[[VAL_2]] : f64)
+! CHECK: }
+
+! CHECK-LABEL: omp.reduction.declare @add_reduction_i_64 : i64 init {
+! CHECK: ^bb0(%[[VAL_0:.*]]: i64):
+! CHECK: %[[VAL_1:.*]] = arith.constant 0 : i64
+! CHECK: omp.yield(%[[VAL_1]] : i64)
+
+! CHECK-LABEL: } combiner {
+! CHECK: ^bb0(%[[VAL_0:.*]]: i64, %[[VAL_1:.*]]: i64):
+! CHECK: %[[VAL_2:.*]] = arith.addi %[[VAL_0]], %[[VAL_1]] : i64
+! CHECK: omp.yield(%[[VAL_2]] : i64)
+! CHECK: }
+
+! CHECK-LABEL: omp.reduction.declare @add_reduction_f_32 : f32 init {
+! CHECK: ^bb0(%[[VAL_0:.*]]: f32):
+! CHECK: %[[VAL_1:.*]] = arith.constant 0.000000e+00 : f32
+! CHECK: omp.yield(%[[VAL_1]] : f32)
+
+! CHECK-LABEL: } combiner {
+! CHECK: ^bb0(%[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: f32):
+! CHECK: %[[VAL_2:.*]] = arith.addf %[[VAL_0]], %[[VAL_1]] fastmath<contract> : f32
+! CHECK: omp.yield(%[[VAL_2]] : f32)
+! CHECK: }
+
+! CHECK-LABEL: omp.reduction.declare @add_reduction_i_32 : i32 init {
+! CHECK: ^bb0(%[[VAL_0:.*]]: i32):
+! CHECK: %[[VAL_1:.*]] = arith.constant 0 : i32
+! CHECK: omp.yield(%[[VAL_1]] : i32)
+
+! CHECK-LABEL: } combiner {
+! CHECK: ^bb0(%[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: i32):
+! CHECK: %[[VAL_2:.*]] = arith.addi %[[VAL_0]], %[[VAL_1]] : i32
+! CHECK: omp.yield(%[[VAL_2]] : i32)
+! CHECK: }
+
+! CHECK-LABEL: func.func @_QPsimple_int_reduction() {
+! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_int_reductionEi"}
+! CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFsimple_int_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_2:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFsimple_int_reductionEx"}
+! CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFsimple_int_reductionEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_4:.*]] = arith.constant 0 : i32
+! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : i32, !fir.ref<i32>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_8:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@add_reduction_i_32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref<i32>) for (%[[VAL_11:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) {
+! CHECK: fir.store %[[VAL_11]] to %[[VAL_6]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_int_reductionEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_14:.*]] = fir.load %[[VAL_6]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_15:.*]] = arith.addi %[[VAL_13]], %[[VAL_14]] : i32
+! CHECK: hlfir.assign %[[VAL_15]] to %[[VAL_12]]#0 : i32, !fir.ref<i32>
+! CHECK: omp.yield
+! CHECK: }
+! CHECK: omp.terminator
+! CHECK: }
+! CHECK: return
+! CHECK: }
+
subroutine simple_int_reduction
integer :: x
x = 0
@@ -75,25 +94,35 @@ subroutine simple_int_reduction
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPsimple_real_reduction
-!CHECK: %[[XREF:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFsimple_real_reductionEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[XREF]] {uniq_name = "_QFsimple_real_reductionEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
-!CHECK: %[[C0_2:.*]] = arith.constant 0.000000e+00 : f32
-!CHECK: hlfir.assign %[[C0_2]] to %[[X_DECL]]#0 : f32, !fir.ref<f32>
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFsimple_real_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_F32_NAME]] -> %[[X_DECL]]#0 : !fir.ref<f32>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]])
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL_i32:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL_f32:.*]] = fir.convert %[[I_PVT_VAL_i32]] : (i32) -> f32
-!CHECK: omp.reduction %[[I_PVT_VAL_f32]], %[[X_DECL]]#0 : f32, !fir.ref<f32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+
+! CHECK-LABEL: func.func @_QPsimple_real_reduction() {
+! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_real_reductionEi"}
+! CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFsimple_real_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_2:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFsimple_real_reductionEx"}
+! CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFsimple_real_reductionEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_4:.*]] = arith.constant 0.000000e+00 : f32
+! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : f32, !fir.ref<f32>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_8:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@add_reduction_f_32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref<f32>) for (%[[VAL_11:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) {
+! CHECK: fir.store %[[VAL_11]] to %[[VAL_6]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_real_reductionEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref<f32>
+! CHECK: %[[VAL_14:.*]] = fir.load %[[VAL_6]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_14]] : (i32) -> f32
+! CHECK: %[[VAL_16:.*]] = arith.addf %[[VAL_13]], %[[VAL_15]] fastmath<contract> : f32
+! CHECK: hlfir.assign %[[VAL_16]] to %[[VAL_12]]#0 : f32, !fir.ref<f32>
+! CHECK: omp.yield
+! CHECK: }
+! CHECK: omp.terminator
+! CHECK: }
+! CHECK: return
+! CHECK: }
+
subroutine simple_real_reduction
real :: x
x = 0.0
@@ -106,24 +135,34 @@ subroutine simple_real_reduction
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPsimple_int_reduction_switch_order
-!CHECK: %[[XREF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFsimple_int_reduction_switch_orderEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %2 {uniq_name = "_QFsimple_int_reduction_switch_orderEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[C0_2:.*]] = arith.constant 0 : i32
-!CHECK: hlfir.assign %[[C0_2]] to %[[X_DECL]]#0 : i32, !fir.ref<i32>
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFsimple_int_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_I32_NAME]] -> %[[X_DECL]]#0 : !fir.ref<i32>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]])
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: omp.reduction %[[I_PVT_VAL]], %[[X_DECL]]#0 : i32, !fir.ref<i32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+
+! CHECK-LABEL: func.func @_QPsimple_int_reduction_switch_order() {
+! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_int_reduction_switch_orderEi"}
+! CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFsimple_int_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_2:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFsimple_int_reduction_switch_orderEx"}
+! CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFsimple_int_reduction_switch_orderEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_4:.*]] = arith.constant 0 : i32
+! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : i32, !fir.ref<i32>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_8:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@add_reduction_i_32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref<i32>) for (%[[VAL_11:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) {
+! CHECK: fir.store %[[VAL_11]] to %[[VAL_6]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_int_reduction_switch_orderEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_6]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_14:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_15:.*]] = arith.addi %[[VAL_13]], %[[VAL_14]] : i32
+! CHECK: hlfir.assign %[[VAL_15]] to %[[VAL_12]]#0 : i32, !fir.ref<i32>
+! CHECK: omp.yield
+! CHECK: }
+! CHECK: omp.terminator
+! CHECK: }
+! CHECK: return
+! CHECK: }
+
subroutine simple_int_reduction_switch_order
integer :: x
x = 0
@@ -136,25 +175,34 @@ subroutine simple_int_reduction_switch_order
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPsimple_real_reduction_switch_order
-!CHECK: %[[XREF:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFsimple_real_reduction_switch_orderEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[XREF]] {uniq_name = "_QFsimple_real_reduction_switch_orderEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
-!CHECK: %[[C0_2:.*]] = arith.constant 0.000000e+00 : f32
-!CHECK: hlfir.assign %[[C0_2]] to %[[X_DECL]]#0 : f32, !fir.ref<f32>
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFsimple_real_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_F32_NAME]] -> %[[X_DECL]]#0 : !fir.ref<f32>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]])
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL_i32:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL_f32:.*]] = fir.convert %[[I_PVT_VAL_i32]] : (i32) -> f32
-!CHECK: omp.reduction %[[I_PVT_VAL_f32]], %[[X_DECL]]#0 : f32, !fir.ref<f32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+! CHECK-LABEL: func.func @_QPsimple_real_reduction_switch_order() {
+! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_real_reduction_switch_orderEi"}
+! CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFsimple_real_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_2:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFsimple_real_reduction_switch_orderEx"}
+! CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFsimple_real_reduction_switch_orderEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_4:.*]] = arith.constant 0.000000e+00 : f32
+! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : f32, !fir.ref<f32>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_8:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@add_reduction_f_32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref<f32>) for (%[[VAL_11:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) {
+! CHECK: fir.store %[[VAL_11]] to %[[VAL_6]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_real_reduction_switch_orderEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_6]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_13]] : (i32) -> f32
+! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref<f32>
+! CHECK: %[[VAL_16:.*]] = arith.addf %[[VAL_14]], %[[VAL_15]] fastmath<contract> : f32
+! CHECK: hlfir.assign %[[VAL_16]] to %[[VAL_12]]#0 : f32, !fir.ref<f32>
+! CHECK: omp.yield
+! CHECK: }
+! CHECK: omp.terminator
+! CHECK: }
+! CHECK: return
+! CHECK: }
+
subroutine simple_real_reduction_switch_order
real :: x
x = 0.0
@@ -167,27 +215,51 @@ subroutine simple_real_reduction_switch_order
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPmultiple_int_reductions_same_type
-!CHECK: %[[XREF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFmultiple_int_reductions_same_typeEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[XREF]] {uniq_name = "_QFmultiple_int_reductions_same_typeEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[YREF:.*]] = fir.alloca i32 {bindc_name = "y", uniq_name = "_QFmultiple_int_reductions_same_typeEy"}
-!CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[YREF]] {uniq_name = "_QFmultiple_int_reductions_same_typeEy"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[ZREF:.*]] = fir.alloca i32 {bindc_name = "z", uniq_name = "_QFmultiple_int_reductions_same_typeEz"}
-!CHECK: %[[Z_DECL:.*]]:2 = hlfir.declare %[[ZREF]] {uniq_name = "_QFmultiple_int_reductions_same_typeEz"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFmultiple_int_reductions_same_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: omp.wsloop reduction(@[[RED_I32_NAME]] -> %[[X_DECL]]#0 : !fir.ref<i32>, @[[RED_I32_NAME]] -> %[[Y_DECL]]#0 : !fir.ref<i32>, @[[RED_I32_NAME]] -> %[[Z_DECL]]#0 : !fir.ref<i32>) for (%[[IVAL]]) : i32
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL1:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: omp.reduction %[[I_PVT_VAL1]], %[[X_DECL]]#0 : i32, !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL2:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: omp.reduction %[[I_PVT_VAL2]], %[[Y_DECL]]#0 : i32, !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL3:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: omp.reduction %[[I_PVT_VAL3]], %[[Z_DECL]]#0 : i32, !fir.ref<i32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+! CHECK-LABEL: func.func @_QPmultiple_int_reductions_same_type() {
+! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFmultiple_int_reductions_same_typeEi"}
+! CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFmultiple_int_reductions_same_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_2:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFmultiple_int_reductions_same_typeEx"}
+! CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFmultiple_int_reductions_same_typeEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_4:.*]] = fir.alloca i32 {bindc_name = "y", uniq_name = "_QFmultiple_int_reductions_same_typeEy"}
+! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_4]] {uniq_name = "_QFmultiple_int_reductions_same_typeEy"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_6:.*]] = fir.alloca i32 {bindc_name = "z", uniq_name = "_QFmultiple_int_reductions_same_typeEz"}
+! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_6]] {uniq_name = "_QFmultiple_int_reductions_same_typeEz"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_8:.*]] = arith.constant 0 : i32
+! CHECK: hlfir.assign %[[VAL_8]] to %[[VAL_3]]#0 : i32, !fir.ref<i32>
+! CHECK: %[[VAL_9:.*]] = arith.constant 0 : i32
+! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_5]]#0 : i32, !fir.ref<i32>
+! CHECK: %[[VAL_10:.*]] = arith.constant 0 : i32
+! CHECK: hlfir.assign %[[VAL_10]] to %[[VAL_7]]#0 : i32, !fir.ref<i32>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_int_reductions_same_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_14:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_15:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@add_reduction_i_32 %[[VAL_3]]#0 -> %[[VAL_16:.*]] : !fir.ref<i32>, @add_reduction_i_32 %[[VAL_5]]#0 -> %[[VAL_17:.*]] : !fir.ref<i32>, @add_reduction_i_32 %[[VAL_7]]#0 -> %[[VAL_18:.*]] : !fir.ref<i32>) for (%[[VAL_19:.*]]) : i32 = (%[[VAL_13]]) to (%[[VAL_14]]) inclusive step (%[[VAL_15]]) {
+! CHECK: fir.store %[[VAL_19]] to %[[VAL_12]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_20:.*]]:2 = hlfir.declare %[[VAL_16]] {uniq_name = "_QFmultiple_int_reductions_same_typeEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_21:.*]]:2 = hlfir.declare %[[VAL_17]] {uniq_name = "_QFmultiple_int_reductions_same_typeEy"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_22:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_int_reductions_same_typeEz"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_23:.*]] = fir.load %[[VAL_20]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_24:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_25:.*]] = arith.addi %[[VAL_23]], %[[VAL_24]] : i32
+! CHECK: hlfir.assign %[[VAL_25]] to %[[VAL_20]]#0 : i32, !fir.ref<i32>
+! CHECK: %[[VAL_26:.*]] = fir.load %[[VAL_21]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_27:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_28:.*]] = arith.addi %[[VAL_26]], %[[VAL_27]] : i32
+! CHECK: hlfir.assign %[[VAL_28]] to %[[VAL_21]]#0 : i32, !fir.ref<i32>
+! CHECK: %[[VAL_29:.*]] = fir.load %[[VAL_22]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_30:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_31:.*]] = arith.addi %[[VAL_29]], %[[VAL_30]] : i32
+! CHECK: hlfir.assign %[[VAL_31]] to %[[VAL_22]]#0 : i32, !fir.ref<i32>
+! CHECK: omp.yield
+! CHECK: }
+! CHECK: omp.terminator
+! CHECK: }
+! CHECK: return
+! CHECK: }
+
subroutine multiple_int_reductions_same_type
integer :: x,y,z
x = 0
@@ -204,30 +276,54 @@ subroutine multiple_int_reductions_same_type
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPmultiple_real_reductions_same_type
-!CHECK: %[[XREF:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFmultiple_real_reductions_same_typeEx"}
-!CHECK: %[[X_DECL]]:2 = hlfir.declare %[[XREF]] {uniq_name = "_QFmultiple_real_reductions_same_typeEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
-!CHECK: %[[YREF:.*]] = fir.alloca f32 {bindc_name = "y", uniq_name = "_QFmultiple_real_reductions_same_typeEy"}
-!CHECK: %[[Y_DECL]]:2 = hlfir.declare %[[YREF]] {uniq_name = "_QFmultiple_real_reductions_same_typeEy"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
-!CHECK: %[[ZREF:.*]] = fir.alloca f32 {bindc_name = "z", uniq_name = "_QFmultiple_real_reductions_same_typeEz"}
-!CHECK: %[[Z_DECL]]:2 = hlfir.declare %[[ZREF]] {uniq_name = "_QFmultiple_real_reductions_same_typeEz"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFmultiple_real_reductions_same_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: omp.wsloop reduction(@[[RED_F32_NAME]] -> %[[X_DECL]]#0 : !fir.ref<f32>, @[[RED_F32_NAME]] -> %[[Y_DECL]]#0 : !fir.ref<f32>, @[[RED_F32_NAME]] -> %[[Z_DECL]]#0 : !fir.ref<f32>) for (%[[IVAL]]) : i32
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL1_I32:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL1_F32:.*]] = fir.convert %[[I_PVT_VAL1_I32]] : (i32) -> f32
-!CHECK: omp.reduction %[[I_PVT_VAL1_F32]], %[[X_DECL]]#0 : f32, !fir.ref<f32>
-!CHECK: %[[I_PVT_VAL2_I32:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL2_F32:.*]] = fir.convert %[[I_PVT_VAL2_I32]] : (i32) -> f32
-!CHECK: omp.reduction %[[I_PVT_VAL2_F32]], %[[Y_DECL]]#0 : f32, !fir.ref<f32>
-!CHECK: %[[I_PVT_VAL3_I32:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL3_F32:.*]] = fir.convert %[[I_PVT_VAL3_I32]] : (i32) -> f32
-!CHECK: omp.reduction %[[I_PVT_VAL3_F32]], %[[Z_DECL]]#0 : f32, !fir.ref<f32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+! CHECK-LABEL: func.func @_QPmultiple_real_reductions_same_type() {
+! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFmultiple_real_reductions_same_typeEi"}
+! CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFmultiple_real_reductions_same_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_2:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFmultiple_real_reductions_same_typeEx"}
+! CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFmultiple_real_reductions_same_typeEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_4:.*]] = fir.alloca f32 {bindc_name = "y", uniq_name = "_QFmultiple_real_reductions_same_typeEy"}
+! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_4]] {uniq_name = "_QFmultiple_real_reductions_same_typeEy"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_6:.*]] = fir.alloca f32 {bindc_name = "z", uniq_name = "_QFmultiple_real_reductions_same_typeEz"}
+! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_6]] {uniq_name = "_QFmultiple_real_reductions_same_typeEz"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_8:.*]] = arith.constant 0.000000e+00 : f32
+! CHECK: hlfir.assign %[[VAL_8]] to %[[VAL_3]]#0 : f32, !fir.ref<f32>
+! CHECK: %[[VAL_9:.*]] = arith.constant 0.000000e+00 : f32
+! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_5]]#0 : f32, !fir.ref<f32>
+! CHECK: %[[VAL_10:.*]] = arith.constant 0.000000e+00 : f32
+! CHECK: hlfir.assign %[[VAL_10]] to %[[VAL_7]]#0 : f32, !fir.ref<f32>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_real_reductions_same_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_14:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_15:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@add_reduction_f_32 %[[VAL_3]]#0 -> %[[VAL_16:.*]] : !fir.ref<f32>, @add_reduction_f_32 %[[VAL_5]]#0 -> %[[VAL_17:.*]] : !fir.ref<f32>, @add_reduction_f_32 %[[VAL_7]]#0 -> %[[VAL_18:.*]] : !fir.ref<f32>) for (%[[VAL_19:.*]]) : i32 = (%[[VAL_13]]) to (%[[VAL_14]]) inclusive step (%[[VAL_15]]) {
+! CHECK: fir.store %[[VAL_19]] to %[[VAL_12]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_20:.*]]:2 = hlfir.declare %[[VAL_16]] {uniq_name = "_QFmultiple_real_reductions_same_typeEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_21:.*]]:2 = hlfir.declare %[[VAL_17]] {uniq_name = "_QFmultiple_real_reductions_same_typeEy"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_22:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_real_reductions_same_typeEz"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_23:.*]] = fir.load %[[VAL_20]]#0 : !fir.ref<f32>
+! CHECK: %[[VAL_24:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_25:.*]] = fir.convert %[[VAL_24]] : (i32) -> f32
+! CHECK: %[[VAL_26:.*]] = arith.addf %[[VAL_23]], %[[VAL_25]] fastmath<contract> : f32
+! CHECK: hlfir.assign %[[VAL_26]] to %[[VAL_20]]#0 : f32, !fir.ref<f32>
+! CHECK: %[[VAL_27:.*]] = fir.load %[[VAL_21]]#0 : !fir.ref<f32>
+! CHECK: %[[VAL_28:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_29:.*]] = fir.convert %[[VAL_28]] : (i32) -> f32
+! CHECK: %[[VAL_30:.*]] = arith.addf %[[VAL_27]], %[[VAL_29]] fastmath<contract> : f32
+! CHECK: hlfir.assign %[[VAL_30]] to %[[VAL_21]]#0 : f32, !fir.ref<f32>
+! CHECK: %[[VAL_31:.*]] = fir.load %[[VAL_22]]#0 : !fir.ref<f32>
+! CHECK: %[[VAL_32:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_33:.*]] = fir.convert %[[VAL_32]] : (i32) -> f32
+! CHECK: %[[VAL_34:.*]] = arith.addf %[[VAL_31]], %[[VAL_33]] fastmath<contract> : f32
+! CHECK: hlfir.assign %[[VAL_34]] to %[[VAL_22]]#0 : f32, !fir.ref<f32>
+! CHECK: omp.yield
+! CHECK: }
+! CHECK: omp.terminator
+! CHECK: }
+! CHECK: return
+! CHECK: }
+
subroutine multiple_real_reductions_same_type
real :: x,y,z
x = 0.0
@@ -244,34 +340,63 @@ subroutine multiple_real_reductions_same_type
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPmultiple_reductions_different_type
-!CHECK: %[[WREF:.*]] = fir.alloca f64 {bindc_name = "w", uniq_name = "_QFmultiple_reductions_different_typeEw"}
-!CHECK: %[[W_DECL:.*]]:2 = hlfir.declare %[[WREF]] {uniq_name = "_QFmultiple_reductions_different_typeEw"} : (!fir.ref<f64>) -> (!fir.ref<f64>, !fir.ref<f64>)
-!CHECK: %[[XREF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFmultiple_reductions_different_typeEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[XREF]] {uniq_name = "_QFmultiple_reductions_different_typeEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[YREF:.*]] = fir.alloca i64 {bindc_name = "y", uniq_name = "_QFmultiple_reductions_different_typeEy"}
-!CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[YREF]] {uniq_name = "_QFmultiple_reductions_different_typeEy"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>)
-!CHECK: %[[ZREF:.*]] = fir.alloca f32 {bindc_name = "z", uniq_name = "_QFmultiple_reductions_different_typeEz"}
-!CHECK: %[[Z_DECL:.*]]:2 = hlfir.declare %[[ZREF]] {uniq_name = "_QFmultiple_reductions_different_typeEz"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFmultiple_reductions_different_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: omp.wsloop reduction(@[[RED_I32_NAME]] -> %[[X_DECL]]#0 : !fir.ref<i32>, @[[RED_I64_NAME]] -> %[[Y_DECL]]#0 : !fir.ref<i64>, @[[RED_F32_NAME]] -> %[[Z_DECL]]#0 : !fir.ref<f32>, @[[RED_F64_NAME]] -> %[[W_DECL]]#0 : !fir.ref<f64>) for (%[[IVAL:.*]]) : i32
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL1_I32:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: omp.reduction %[[I_PVT_VAL1_I32]], %[[X_DECL]]#0 : i32, !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL2_I32:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL2_I64:.*]] = fir.convert %[[I_PVT_VAL2_I32]] : (i32) -> i64
-!CHECK: omp.reduction %[[I_PVT_VAL2_I64]], %[[Y_DECL]]#0 : i64, !fir.ref<i64>
-!CHECK: %[[I_PVT_VAL3_I32:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL3_F32:.*]] = fir.convert %[[I_PVT_VAL3_I32]] : (i32) -> f32
-!CHECK: omp.reduction %[[I_PVT_VAL3_F32]], %[[Z_DECL]]#0 : f32, !fir.ref<f32>
-!CHECK: %[[I_PVT_VAL4_I32:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL4_F64:.*]] = fir.convert %[[I_PVT_VAL4_I32]] : (i32) -> f64
-!CHECK: omp.reduction %[[I_PVT_VAL4_F64]], %[[W_DECL]]#0 : f64, !fir.ref<f64>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+! CHECK-LABEL: func.func @_QPmultiple_reductions_different_type() {
+! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFmultiple_reductions_different_typeEi"}
+! CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFmultiple_reductions_different_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_2:.*]] = fir.alloca f64 {bindc_name = "w", uniq_name = "_QFmultiple_reductions_different_typeEw"}
+! CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFmultiple_reductions_different_typeEw"} : (!fir.ref<f64>) -> (!fir.ref<f64>, !fir.ref<f64>)
+! CHECK: %[[VAL_4:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFmultiple_reductions_different_typeEx"}
+! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_4]] {uniq_name = "_QFmultiple_reductions_different_typeEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_6:.*]] = fir.alloca i64 {bindc_name = "y", uniq_name = "_QFmultiple_reductions_different_typeEy"}
+! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_6]] {uniq_name = "_QFmultiple_reductions_different_typeEy"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>)
+! CHECK: %[[VAL_8:.*]] = fir.alloca f32 {bindc_name = "z", uniq_name = "_QFmultiple_reductions_different_typeEz"}
+! CHECK: %[[VAL_9:.*]]:2 = hlfir.declare %[[VAL_8]] {uniq_name = "_QFmultiple_reductions_different_typeEz"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_10:.*]] = arith.constant 0 : i32
+! CHECK: hlfir.assign %[[VAL_10]] to %[[VAL_5]]#0 : i32, !fir.ref<i32>
+! CHECK: %[[VAL_11:.*]] = arith.constant 0 : i64
+! CHECK: hlfir.assign %[[VAL_11]] to %[[VAL_7]]#0 : i64, !fir.ref<i64>
+! CHECK: %[[VAL_12:.*]] = arith.constant 0.000000e+00 : f32
+! CHECK: hlfir.assign %[[VAL_12]] to %[[VAL_9]]#0 : f32, !fir.ref<f32>
+! CHECK: %[[VAL_13:.*]] = arith.constant 0.000000e+00 : f64
+! CHECK: hlfir.assign %[[VAL_13]] to %[[VAL_3]]#0 : f64, !fir.ref<f64>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_14:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_15:.*]]:2 = hlfir.declare %[[VAL_14]] {uniq_name = "_QFmultiple_reductions_different_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_16:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_17:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_18:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@add_reduction_i_32 %[[VAL_5]]#0 -> %[[VAL_19:.*]] : !fir.ref<i32>, @add_reduction_i_64 %[[VAL_7]]#0 -> %[[VAL_20:.*]] : !fir.ref<i64>, @add_reduction_f_32 %[[VAL_9]]#0 -> %[[VAL_21:.*]] : !fir.ref<f32>, @add_reduction_f_64 %[[VAL_3]]#0 -> %[[VAL_22:.*]] : !fir.ref<f64>) for (%[[VAL_23:.*]]) : i32 = (%[[VAL_16]]) to (%[[VAL_17]]) inclusive step (%[[VAL_18]]) {
+! CHECK: fir.store %[[VAL_23]] to %[[VAL_15]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_24:.*]]:2 = hlfir.declare %[[VAL_19]] {uniq_name = "_QFmultiple_reductions_different_typeEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_25:.*]]:2 = hlfir.declare %[[VAL_20]] {uniq_name = "_QFmultiple_reductions_different_typeEy"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>)
+! CHECK: %[[VAL_26:.*]]:2 = hlfir.declare %[[VAL_21]] {uniq_name = "_QFmultiple_reductions_different_typeEz"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_27:.*]]:2 = hlfir.declare %[[VAL_22]] {uniq_name = "_QFmultiple_reductions_different_typeEw"} : (!fir.ref<f64>) -> (!fir.ref<f64>, !fir.ref<f64>)
+! CHECK: %[[VAL_28:.*]] = fir.load %[[VAL_24]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_29:.*]] = fir.load %[[VAL_15]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_30:.*]] = arith.addi %[[VAL_28]], %[[VAL_29]] : i32
+! CHECK: hlfir.assign %[[VAL_30]] to %[[VAL_24]]#0 : i32, !fir.ref<i32>
+! CHECK: %[[VAL_31:.*]] = fir.load %[[VAL_25]]#0 : !fir.ref<i64>
+! CHECK: %[[VAL_32:.*]] = fir.load %[[VAL_15]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_33:.*]] = fir.convert %[[VAL_32]] : (i32) -> i64
+! CHECK: %[[VAL_34:.*]] = arith.addi %[[VAL_31]], %[[VAL_33]] : i64
+! CHECK: hlfir.assign %[[VAL_34]] to %[[VAL_25]]#0 : i64, !fir.ref<i64>
+! CHECK: %[[VAL_35:.*]] = fir.load %[[VAL_26]]#0 : !fir.ref<f32>
+! CHECK: %[[VAL_36:.*]] = fir.load %[[VAL_15]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_37:.*]] = fir.convert %[[VAL_36]] : (i32) -> f32
+! CHECK: %[[VAL_38:.*]] = arith.addf %[[VAL_35]], %[[VAL_37]] fastmath<contract> : f32
+! CHECK: hlfir.assign %[[VAL_38]] to %[[VAL_26]]#0 : f32, !fir.ref<f32>
+! CHECK: %[[VAL_39:.*]] = fir.load %[[VAL_27]]#0 : !fir.ref<f64>
+! CHECK: %[[VAL_40:.*]] = fir.load %[[VAL_15]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_41:.*]] = fir.convert %[[VAL_40]] : (i32) -> f64
+! CHECK: %[[VAL_42:.*]] = arith.addf %[[VAL_39]], %[[VAL_41]] fastmath<contract> : f64
+! CHECK: hlfir.assign %[[VAL_42]] to %[[VAL_27]]#0 : f64, !fir.ref<f64>
+! CHECK: omp.yield
+! CHECK: }
+! CHECK: omp.terminator
+! CHECK: }
+! CHECK: return
+! CHECK: }
+
subroutine multiple_reductions_different_type
integer :: x
integer(kind=8) :: y
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-iand.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-iand.f90
index 29cd536..9588531 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-iand.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-iand.f90
@@ -1,32 +1,48 @@
! RUN: bbc -emit-hlfir -fopenmp %s -o - | FileCheck %s
! RUN: %flang_fc1 -emit-hlfir -fopenmp %s -o - | FileCheck %s
-!CHECK: omp.reduction.declare @[[IAND_DECLARE_I:.*]] : i32 init {
-!CHECK: %[[ZERO_VAL_I:.*]] = arith.constant -1 : i32
-!CHECK: omp.yield(%[[ZERO_VAL_I]] : i32)
-!CHECK: combiner
-!CHECK: ^bb0(%[[ARG0_I:.*]]: i32, %[[ARG1_I:.*]]: i32):
-!CHECK: %[[IAND_VAL_I:.*]] = arith.andi %[[ARG0_I]], %[[ARG1_I]] : i32
-!CHECK: omp.yield(%[[IAND_VAL_I]] : i32)
+! NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
+
+! CHECK-LABEL: omp.reduction.declare @iand_i_32 : i32 init {
+! CHECK: ^bb0(%[[VAL_0:.*]]: i32):
+! CHECK: %[[VAL_1:.*]] = arith.constant -1 : i32
+! CHECK: omp.yield(%[[VAL_1]] : i32)
+
+! CHECK-LABEL: } combiner {
+! CHECK: ^bb0(%[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: i32):
+! CHECK: %[[VAL_2:.*]] = arith.andi %[[VAL_0]], %[[VAL_1]] : i32
+! CHECK: omp.yield(%[[VAL_2]] : i32)
+! CHECK: }
+
+! CHECK-LABEL: func.func @_QPreduction_iand(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "y"}) {
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFreduction_iandEi"}
+! CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QFreduction_iandEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_3:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFreduction_iandEx"}
+! CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_3]] {uniq_name = "_QFreduction_iandEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFreduction_iandEy"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
+! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32
+! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : i32, !fir.ref<i32>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_iandEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_11:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@iand_i_32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref<i32>) for (%[[VAL_13:.*]]) : i32 = (%[[VAL_9]]) to (%[[VAL_10]]) inclusive step (%[[VAL_11]]) {
+! CHECK: fir.store %[[VAL_13]] to %[[VAL_8]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_12]] {uniq_name = "_QFreduction_iandEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_8]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_15]] : (i32) -> i64
+! CHECK: %[[VAL_17:.*]] = hlfir.designate %[[VAL_5]]#0 (%[[VAL_16]]) : (!fir.box<!fir.array<?xi32>>, i64) -> !fir.ref<i32>
+! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_14]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_17]] : !fir.ref<i32>
+! CHECK: %[[VAL_20:.*]] = arith.andi %[[VAL_18]], %[[VAL_19]] : i32
+! CHECK: hlfir.assign %[[VAL_20]] to %[[VAL_14]]#0 : i32, !fir.ref<i32>
+! CHECK: omp.yield
+! CHECK: omp.terminator
-!CHECK-LABEL: @_QPreduction_iand
-!CHECK-SAME: %[[Y_BOX:.*]]: !fir.box<!fir.array<?xi32>>
-!CHECK: %[[X_REF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFreduction_iandEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[X_REF]] {uniq_name = "_QFreduction_iandEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[Y_BOX]] {uniq_name = "_QFreduction_iandEy"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
-!CHECK: omp.parallel
-!CHECK: %[[I_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_DECL:.*]]:2 = hlfir.declare %[[I_REF]] {uniq_name = "_QFreduction_iandEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: omp.wsloop reduction(@[[IAND_DECLARE_I]] -> %[[X_DECL]]#0 : !fir.ref<i32>) for
-!CHECK: fir.store %{{.*}} to %[[I_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_32:.*]] = fir.load %[[I_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[I_64:.*]] = fir.convert %[[I_32]] : (i32) -> i64
-!CHECK: %[[Y_I_REF:.*]] = hlfir.designate %[[Y_DECL]]#0 (%[[I_64]]) : (!fir.box<!fir.array<?xi32>>, i64) -> !fir.ref<i32>
-!CHECK: %[[Y_I:.*]] = fir.load %[[Y_I_REF]] : !fir.ref<i32>
-!CHECK: omp.reduction %[[Y_I]], %[[X_DECL]]#0 : i32, !fir.ref<i32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
subroutine reduction_iand(y)
integer :: x, y(:)
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-ieor.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-ieor.f90
index 3131d1b..a14a371 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-ieor.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-ieor.f90
@@ -10,7 +10,7 @@
!CHECK: omp.yield(%[[IEOR_VAL_I]] : i32)
!CHECK-LABEL: @_QPreduction_ieor
-!CHECK-SAME: %[[Y_BOX:.*]]: !fir.box<!fir.array<?xi32>>
+!CHECK-SAME: %[[Y_BOX:.*]]: !fir.box<!fir.array<?xi32>>
!CHECK: %[[X_REF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFreduction_ieorEx"}
!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[X_REF]] {uniq_name = "_QFreduction_ieorEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
!CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[Y_BOX]] {uniq_name = "_QFreduction_ieorEy"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
@@ -19,13 +19,16 @@
!CHECK: omp.parallel
!CHECK: %[[I_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
!CHECK: %[[I_DECL:.*]]:2 = hlfir.declare %[[I_REF]] {uniq_name = "_QFreduction_ieorEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: omp.wsloop reduction(@[[IEOR_DECLARE_I]] -> %[[X_DECL]]#0 : !fir.ref<i32>) for
+!CHECK: omp.wsloop reduction(@[[IEOR_DECLARE_I]] %[[X_DECL]]#0 -> %[[PRV:.+]] : !fir.ref<i32>) for
!CHECK: fir.store %{{.*}} to %[[I_DECL]]#1 : !fir.ref<i32>
+!CHECK: %[[PRV_DECL:.+]]:2 = hlfir.declare %[[PRV]] {{.*}} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
!CHECK: %[[I_32:.*]] = fir.load %[[I_DECL]]#0 : !fir.ref<i32>
!CHECK: %[[I_64:.*]] = fir.convert %[[I_32]] : (i32) -> i64
!CHECK: %[[Y_I_REF:.*]] = hlfir.designate %[[Y_DECL]]#0 (%[[I_64]]) : (!fir.box<!fir.array<?xi32>>, i64) -> !fir.ref<i32>
+!CHECK: %[[LPRV:.+]] = fir.load %[[PRV_DECL]]#0 : !fir.ref<i32>
!CHECK: %[[Y_I:.*]] = fir.load %[[Y_I_REF]] : !fir.ref<i32>
-!CHECK: omp.reduction %[[Y_I]], %[[X_DECL]]#0 : i32, !fir.ref<i32>
+!CHECK: %[[RES:.+]] = arith.xori %[[LPRV]], %[[Y_I]] : i32
+!CHECK: hlfir.assign %[[RES]] to %[[PRV_DECL]]#0 : i32, !fir.ref<i32>
!CHECK: omp.yield
!CHECK: omp.terminator
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-ior.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-ior.f90
index 5e3d5bd..3b5e327 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-ior.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-ior.f90
@@ -1,31 +1,48 @@
! RUN: bbc -emit-hlfir -fopenmp %s -o - | FileCheck %s
! RUN: %flang_fc1 -emit-hlfir -fopenmp %s -o - | FileCheck %s
-!CHECK: omp.reduction.declare @[[IOR_DECLARE_I:.*]] : i32 init {
-!CHECK: %[[ZERO_VAL_I:.*]] = arith.constant 0 : i32
-!CHECK: omp.yield(%[[ZERO_VAL_I]] : i32)
-!CHECK: combiner
-!CHECK: ^bb0(%[[ARG0_I:.*]]: i32, %[[ARG1_I:.*]]: i32):
-!CHECK: %[[IOR_VAL_I:.*]] = arith.ori %[[ARG0_I]], %[[ARG1_I]] : i32
-!CHECK: omp.yield(%[[IOR_VAL_I]] : i32)
+! NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
+
+! CHECK-LABEL: omp.reduction.declare @ior_i_32 : i32 init {
+! CHECK: ^bb0(%[[VAL_0:.*]]: i32):
+! CHECK: %[[VAL_1:.*]] = arith.constant 0 : i32
+! CHECK: omp.yield(%[[VAL_1]] : i32)
+
+! CHECK-LABEL: } combiner {
+! CHECK: ^bb0(%[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: i32):
+! CHECK: %[[VAL_2:.*]] = arith.ori %[[VAL_0]], %[[VAL_1]] : i32
+! CHECK: omp.yield(%[[VAL_2]] : i32)
+! CHECK: }
+
+! CHECK-LABEL: func.func @_QPreduction_ior(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "y"}) {
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFreduction_iorEi"}
+! CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QFreduction_iorEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_3:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFreduction_iorEx"}
+! CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_3]] {uniq_name = "_QFreduction_iorEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFreduction_iorEy"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
+! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32
+! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : i32, !fir.ref<i32>
+! CHECK: omp.parallel
+! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_iorEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_11:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@ior_i_32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref<i32>) for (%[[VAL_13:.*]]) : i32 = (%[[VAL_9]]) to (%[[VAL_10]]) inclusive step (%[[VAL_11]])
+! CHECK: fir.store %[[VAL_13]] to %[[VAL_8]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_12]] {uniq_name = "_QFreduction_iorEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_8]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_15]] : (i32) -> i64
+! CHECK: %[[VAL_17:.*]] = hlfir.designate %[[VAL_5]]#0 (%[[VAL_16]]) : (!fir.box<!fir.array<?xi32>>, i64) -> !fir.ref<i32>
+! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_14]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_17]] : !fir.ref<i32>
+! CHECK: %[[VAL_20:.*]] = arith.ori %[[VAL_18]], %[[VAL_19]] : i32
+! CHECK: hlfir.assign %[[VAL_20]] to %[[VAL_14]]#0 : i32, !fir.ref<i32>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+
-!CHECK-LABEL: @_QPreduction_ior
-!CHECK-SAME: %[[Y_BOX:.*]]: !fir.box<!fir.array<?xi32>>
-!CHECK: %[[X_REF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFreduction_iorEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[X_REF]] {uniq_name = "_QFreduction_iorEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[Y_BOX]] {uniq_name = "_QFreduction_iorEy"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
-!CHECK: omp.parallel
-!CHECK: %[[I_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_DECL:.*]]:2 = hlfir.declare %[[I_REF]] {uniq_name = "_QFreduction_iorEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: omp.wsloop reduction(@[[IOR_DECLARE_I]] -> %[[X_DECL]]#0 : !fir.ref<i32>) for
-!CHECK: fir.store %{{.*}} to %[[I_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_32:.*]] = fir.load %[[I_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[I_64:.*]] = fir.convert %[[I_32]] : (i32) -> i64
-!CHECK: %[[Y_I_REF:.*]] = hlfir.designate %[[Y_DECL]]#0 (%[[I_64]]) : (!fir.box<!fir.array<?xi32>>, i64) -> !fir.ref<i32>
-!CHECK: %[[Y_I:.*]] = fir.load %[[Y_I_REF]] : !fir.ref<i32>
-!CHECK: omp.reduction %[[Y_I]], %[[X_DECL]]#0 : i32, !fir.ref<i32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
subroutine reduction_ior(y)
integer :: x, y(:)
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-logical-and.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-logical-and.f90
index 243c8a1..17d3216 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-logical-and.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-logical-and.f90
@@ -1,77 +1,106 @@
! RUN: bbc -emit-hlfir -fopenmp %s -o - | FileCheck %s
! RUN: %flang_fc1 -emit-hlfir -fopenmp %s -o - | FileCheck %s
-!CHECK-LABEL: omp.reduction.declare
-!CHECK-SAME: @[[RED_NAME:.*]] : !fir.logical<4> init {
-!CHECK: ^bb0(%{{.*}}: !fir.logical<4>):
-!CHECK: %true = arith.constant true
-!CHECK: %[[true_fir:.*]] = fir.convert %true : (i1) -> !fir.logical<4>
-!CHECK: omp.yield(%[[true_fir]] : !fir.logical<4>)
-!CHECK: } combiner {
-!CHECK: ^bb0(%[[ARG0:.*]]: !fir.logical<4>, %[[ARG1:.*]]: !fir.logical<4>):
-!CHECK: %[[arg0_i1:.*]] = fir.convert %[[ARG0]] : (!fir.logical<4>) -> i1
-!CHECK: %[[arg1_i1:.*]] = fir.convert %[[ARG1]] : (!fir.logical<4>) -> i1
-!CHECK: %[[RES:.*]] = arith.andi %[[arg0_i1]], %[[arg1_i1]] : i1
-!CHECK: %[[RES_logical:.*]] = fir.convert %[[RES]] : (i1) -> !fir.logical<4>
-!CHECK: omp.yield(%[[RES_logical]] : !fir.logical<4>)
-!CHECK: }
+! NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
+
+! CHECK-LABEL: omp.reduction.declare @and_reduction : !fir.logical<4> init {
+! CHECK: ^bb0(%[[VAL_0:.*]]: !fir.logical<4>):
+! CHECK: %[[VAL_1:.*]] = arith.constant true
+! CHECK: %[[VAL_2:.*]] = fir.convert %[[VAL_1]] : (i1) -> !fir.logical<4>
+! CHECK: omp.yield(%[[VAL_2]] : !fir.logical<4>)
+
+! CHECK-LABEL: } combiner {
+! CHECK: ^bb0(%[[VAL_0:.*]]: !fir.logical<4>, %[[VAL_1:.*]]: !fir.logical<4>):
+! CHECK: %[[VAL_2:.*]] = fir.convert %[[VAL_0]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_3:.*]] = fir.convert %[[VAL_1]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_4:.*]] = arith.andi %[[VAL_2]], %[[VAL_3]] : i1
+! CHECK: %[[VAL_5:.*]] = fir.convert %[[VAL_4]] : (i1) -> !fir.logical<4>
+! CHECK: omp.yield(%[[VAL_5]] : !fir.logical<4>)
+! CHECK: }
+
+! CHECK-LABEL: func.func @_QPsimple_reduction(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reductionEi"}
+! CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_3:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reductionEx"}
+! CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_3]] {uniq_name = "_QFsimple_reductionEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_5:.*]] = arith.constant 100 : index
+! CHECK: %[[VAL_6:.*]] = fir.shape %[[VAL_5]] : (index) -> !fir.shape<1>
+! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_6]]) {uniq_name = "_QFsimple_reductionEy"} : (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.ref<!fir.array<100x!fir.logical<4>>>)
+! CHECK: %[[VAL_8:.*]] = arith.constant true
+! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@and_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref<!fir.logical<4>>) for (%[[VAL_16:.*]]) : i32 = (%[[VAL_12]]) to (%[[VAL_13]]) inclusive step (%[[VAL_14]]) {
+! CHECK: fir.store %[[VAL_16]] to %[[VAL_11]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_15]] {uniq_name = "_QFsimple_reductionEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_17]]#0 : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_11]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_20:.*]] = fir.convert %[[VAL_19]] : (i32) -> i64
+! CHECK: %[[VAL_21:.*]] = hlfir.designate %[[VAL_7]]#0 (%[[VAL_20]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_22:.*]] = fir.load %[[VAL_21]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_23:.*]] = fir.convert %[[VAL_18]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_24:.*]] = fir.convert %[[VAL_22]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_25:.*]] = arith.andi %[[VAL_23]], %[[VAL_24]] : i1
+! CHECK: %[[VAL_26:.*]] = fir.convert %[[VAL_25]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_26]] to %[[VAL_17]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: return
-!CHECK-LABEL: func.func @_QPsimple_reduction(
-!CHECK-SAME: %[[ARRAY:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
-!CHECK: %[[IREF:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reductionEi"}
-!CHECK: %[[XREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reductionEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[XREF]] {uniq_name = "_QFsimple_reductionEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
-!CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[ARRAY]](%4) {uniq_name = "_QFsimple_reductionEy"} : (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.ref<!fir.array<100x!fir.logical<4>>>)
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_NAME]] -> %[[X_DECL]]#0 : !fir.ref<!fir.logical<4>>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]]) {
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[I_PVT_64:.*]] = fir.convert %[[I_PVT]] : (i32) -> i64
-!CHECK: %[[Y_I_REF:.*]] = hlfir.designate %[[Y_DECL]]#0 (%[[I_PVT_64]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[Y_I_VAL:.*]] = fir.load %[[Y_I_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[Y_I_VAL]], %[[X_DECL]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
subroutine simple_reduction(y)
logical :: x, y(100)
x = .true.
!$omp parallel
!$omp do reduction(.and.:x)
do i=1, 100
- x = x .and. y(i)
+ x = x .and. y(i)
end do
!$omp end do
!$omp end parallel
-end subroutine
+end subroutine simple_reduction
+
+
+! CHECK-LABEL: func.func @_QPsimple_reduction_switch_order(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reduction_switch_orderEi"}
+! CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_3:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reduction_switch_orderEx"}
+! CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_3]] {uniq_name = "_QFsimple_reduction_switch_orderEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_5:.*]] = arith.constant 100 : index
+! CHECK: %[[VAL_6:.*]] = fir.shape %[[VAL_5]] : (index) -> !fir.shape<1>
+! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_6]]) {uniq_name = "_QFsimple_reduction_switch_orderEy"} : (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.ref<!fir.array<100x!fir.logical<4>>>)
+! CHECK: %[[VAL_8:.*]] = arith.constant true
+! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@and_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref<!fir.logical<4>>) for (%[[VAL_16:.*]]) : i32 = (%[[VAL_12]]) to (%[[VAL_13]]) inclusive step (%[[VAL_14]]) {
+! CHECK: fir.store %[[VAL_16]] to %[[VAL_11]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_15]] {uniq_name = "_QFsimple_reduction_switch_orderEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_11]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_18]] : (i32) -> i64
+! CHECK: %[[VAL_20:.*]] = hlfir.designate %[[VAL_7]]#0 (%[[VAL_19]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_21:.*]] = fir.load %[[VAL_20]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_22:.*]] = fir.load %[[VAL_17]]#0 : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_23:.*]] = fir.convert %[[VAL_21]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_24:.*]] = fir.convert %[[VAL_22]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_25:.*]] = arith.andi %[[VAL_23]], %[[VAL_24]] : i1
+! CHECK: %[[VAL_26:.*]] = fir.convert %[[VAL_25]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_26]] to %[[VAL_17]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: return
-!CHECK-LABEL: func.func @_QPsimple_reduction_switch_order(
-!CHECK-SAME: %[[ARRAY:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
-!CHECK: %[[IREF:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reduction_switch_orderEi"}
-!CHECK: %[[XREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reduction_switch_orderEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[XREF]] {uniq_name = "_QFsimple_reduction_switch_orderEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
-!CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[ARRAY]](%{{.*}}) {uniq_name = "_QFsimple_reduction_switch_orderEy"} : (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.ref<!fir.array<100x!fir.logical<4>>>)
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_NAME]] -> %[[X_DECL]]#0 : !fir.ref<!fir.logical<4>>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]]) {
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[CONVI_64:.*]] = fir.convert %[[I_PVT_VAL]] : (i32) -> i64
-!CHECK: %[[Y_I_REF:.*]] = hlfir.designate %[[Y_DECL]]#0 (%[[CONVI_64]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[YVAL:.*]] = fir.load %[[Y_I_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[YVAL]], %[[X_DECL]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
subroutine simple_reduction_switch_order(y)
logical :: x, y(100)
x = .true.
@@ -84,43 +113,75 @@ subroutine simple_reduction_switch_order(y)
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPmultiple_reductions
-!CHECK-SAME %[[ARRAY:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "w"}) {
-!CHECK: %[[IREF:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFmultiple_reductionsEi"}
-!CHECK: %[[I_DECL:.*]]:2 = hlfir.declare %[[IREF]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[W_DECL:.*]]:2 = hlfir.declare %[[ARRAY]](%{{.*}}) {uniq_name = "_QFmultiple_reductionsEw"} : (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.ref<!fir.array<100x!fir.logical<4>>>)
-!CHECK: %[[XREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFmultiple_reductionsEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[XREF]] {uniq_name = "_QFmultiple_reductionsEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
-!CHECK: %[[YREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "y", uniq_name = "_QFmultiple_reductionsEy"}
-!CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[YREF]] {uniq_name = "_QFmultiple_reductionsEy"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
-!CHECK: %[[ZREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "z", uniq_name = "_QFmultiple_reductionsEz"}
-!CHECK: %[[Z_DECL:.*]]:2 = hlfir.declare %[[ZREF]] {uniq_name = "_QFmultiple_reductionsEz"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_NAME]] -> %[[X_DECL]]#0 : !fir.ref<!fir.logical<4>>, @[[RED_NAME]] -> %[[Y_DECL]]#0 : !fir.ref<!fir.logical<4>>, @[[RED_NAME]] -> %[[Z_DECL]]#0 : !fir.ref<!fir.logical<4>>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]]) {
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL1:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[CONVI_64_1:.*]] = fir.convert %[[I_PVT_VAL1]] : (i32) -> i64
-!CHECK: %[[W_I_REF:.*]] = hlfir.designate %[[W_DECL]]#0 (%[[CONVI_64_1]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[W_I_VAL:.*]] = fir.load %[[W_I_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[W_I_VAL]], %[[X_DECL]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: %[[I_PVT_VAL2:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[CONVI_64_2:.*]] = fir.convert %[[I_PVT_VAL2]] : (i32) -> i64
-!CHECK: %[[W_I_REF:.*]] = hlfir.designate %[[W_DECL]]#0 (%[[CONVI_64_2]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[W_I_VAL:.*]] = fir.load %[[W_I_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[W_I_VAL]], %[[Y_DECL]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: %[[I_PVT_VAL2:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[CONVI_64_2:.*]] = fir.convert %[[I_PVT_VAL2]] : (i32) -> i64
-!CHECK: %[[W_I_REF:.*]] = hlfir.designate %[[W_DECL]]#0 (%[[CONVI_64_2]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[W_I_VAL:.*]] = fir.load %[[W_I_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[W_I_VAL]], %[[Z_DECL]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+! CHECK-LABEL: func.func @_QPmultiple_reductions(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "w"}) {
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFmultiple_reductionsEi"}
+! CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_3:.*]] = arith.constant 100 : index
+! CHECK: %[[VAL_4:.*]] = fir.shape %[[VAL_3]] : (index) -> !fir.shape<1>
+! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_4]]) {uniq_name = "_QFmultiple_reductionsEw"} : (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.ref<!fir.array<100x!fir.logical<4>>>)
+! CHECK: %[[VAL_6:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFmultiple_reductionsEx"}
+! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_6]] {uniq_name = "_QFmultiple_reductionsEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_8:.*]] = fir.alloca !fir.logical<4> {bindc_name = "y", uniq_name = "_QFmultiple_reductionsEy"}
+! CHECK: %[[VAL_9:.*]]:2 = hlfir.declare %[[VAL_8]] {uniq_name = "_QFmultiple_reductionsEy"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_10:.*]] = fir.alloca !fir.logical<4> {bindc_name = "z", uniq_name = "_QFmultiple_reductionsEz"}
+! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFmultiple_reductionsEz"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_12:.*]] = arith.constant true
+! CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_12]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_13]] to %[[VAL_7]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_14:.*]] = arith.constant true
+! CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_14]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_15]] to %[[VAL_9]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_16:.*]] = arith.constant true
+! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_17]] to %[[VAL_11]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_20:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_21:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_22:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@and_reduction %[[VAL_7]]#0 -> %[[VAL_23:.*]] : !fir.ref<!fir.logical<4>>, @and_reduction %[[VAL_9]]#0 -> %[[VAL_24:.*]] : !fir.ref<!fir.logical<4>>, @and_reduction %[[VAL_11]]#0 -> %[[VAL_25:.*]] : !fir.ref<!fir.logical<4>>) for (%[[VAL_26:.*]]) : i32 = (%[[VAL_20]]) to (%[[VAL_21]]) inclusive step (%[[VAL_22]]) {
+! CHECK: fir.store %[[VAL_26]] to %[[VAL_19]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_27:.*]]:2 = hlfir.declare %[[VAL_23]] {uniq_name = "_QFmultiple_reductionsEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_28:.*]]:2 = hlfir.declare %[[VAL_24]] {uniq_name = "_QFmultiple_reductionsEy"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_29:.*]]:2 = hlfir.declare %[[VAL_25]] {uniq_name = "_QFmultiple_reductionsEz"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_30:.*]] = fir.load %[[VAL_27]]#0 : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_31:.*]] = fir.load %[[VAL_19]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_32:.*]] = fir.convert %[[VAL_31]] : (i32) -> i64
+! CHECK: %[[VAL_33:.*]] = hlfir.designate %[[VAL_5]]#0 (%[[VAL_32]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_34:.*]] = fir.load %[[VAL_33]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_35:.*]] = fir.convert %[[VAL_30]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_36:.*]] = fir.convert %[[VAL_34]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_37:.*]] = arith.andi %[[VAL_35]], %[[VAL_36]] : i1
+! CHECK: %[[VAL_38:.*]] = fir.convert %[[VAL_37]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_38]] to %[[VAL_27]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_39:.*]] = fir.load %[[VAL_28]]#0 : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_40:.*]] = fir.load %[[VAL_19]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_41:.*]] = fir.convert %[[VAL_40]] : (i32) -> i64
+! CHECK: %[[VAL_42:.*]] = hlfir.designate %[[VAL_5]]#0 (%[[VAL_41]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_43:.*]] = fir.load %[[VAL_42]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_44:.*]] = fir.convert %[[VAL_39]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_45:.*]] = fir.convert %[[VAL_43]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_46:.*]] = arith.andi %[[VAL_44]], %[[VAL_45]] : i1
+! CHECK: %[[VAL_47:.*]] = fir.convert %[[VAL_46]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_47]] to %[[VAL_28]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_48:.*]] = fir.load %[[VAL_29]]#0 : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_49:.*]] = fir.load %[[VAL_19]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_50:.*]] = fir.convert %[[VAL_49]] : (i32) -> i64
+! CHECK: %[[VAL_51:.*]] = hlfir.designate %[[VAL_5]]#0 (%[[VAL_50]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_52:.*]] = fir.load %[[VAL_51]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_53:.*]] = fir.convert %[[VAL_48]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_54:.*]] = fir.convert %[[VAL_52]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_55:.*]] = arith.andi %[[VAL_53]], %[[VAL_54]] : i1
+! CHECK: %[[VAL_56:.*]] = fir.convert %[[VAL_55]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_56]] to %[[VAL_29]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: return
+
+
+
subroutine multiple_reductions(w)
logical :: x,y,z,w(100)
x = .true.
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-logical-eqv.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-logical-eqv.f90
index f25ed63..8204e4c 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-logical-eqv.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-logical-eqv.f90
@@ -1,43 +1,58 @@
! RUN: bbc -emit-hlfir -fopenmp %s -o - | FileCheck %s
! RUN: %flang_fc1 -emit-hlfir -fopenmp %s -o - | FileCheck %s
-!CHECK-LABEL: omp.reduction.declare
-!CHECK-SAME: @[[RED_NAME:.*]] : !fir.logical<4> init {
-!CHECK: ^bb0(%{{.*}}: !fir.logical<4>):
-!CHECK: %true = arith.constant true
-!CHECK: %[[true_fir:.*]] = fir.convert %true : (i1) -> !fir.logical<4>
-!CHECK: omp.yield(%[[true_fir]] : !fir.logical<4>)
-!CHECK: } combiner {
-!CHECK: ^bb0(%[[ARG0:.*]]: !fir.logical<4>, %[[ARG1:.*]]: !fir.logical<4>):
-!CHECK: %[[arg0_i1:.*]] = fir.convert %[[ARG0]] : (!fir.logical<4>) -> i1
-!CHECK: %[[arg1_i1:.*]] = fir.convert %[[ARG1]] : (!fir.logical<4>) -> i1
-!CHECK: %[[RES:.*]] = arith.cmpi eq, %[[arg0_i1]], %[[arg1_i1]] : i1
-!CHECK: %[[RES_logical:.*]] = fir.convert %[[RES]] : (i1) -> !fir.logical<4>
-!CHECK: omp.yield(%[[RES_logical]] : !fir.logical<4>)
-!CHECK: }
+! NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
+
+! CHECK-LABEL: omp.reduction.declare @eqv_reduction : !fir.logical<4> init {
+! CHECK: ^bb0(%[[VAL_0:.*]]: !fir.logical<4>):
+! CHECK: %[[VAL_1:.*]] = arith.constant true
+! CHECK: %[[VAL_2:.*]] = fir.convert %[[VAL_1]] : (i1) -> !fir.logical<4>
+! CHECK: omp.yield(%[[VAL_2]] : !fir.logical<4>)
+
+! CHECK-LABEL: } combiner {
+! CHECK: ^bb0(%[[VAL_0:.*]]: !fir.logical<4>, %[[VAL_1:.*]]: !fir.logical<4>):
+! CHECK: %[[VAL_2:.*]] = fir.convert %[[VAL_0]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_3:.*]] = fir.convert %[[VAL_1]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_4:.*]] = arith.cmpi eq, %[[VAL_2]], %[[VAL_3]] : i1
+! CHECK: %[[VAL_5:.*]] = fir.convert %[[VAL_4]] : (i1) -> !fir.logical<4>
+! CHECK: omp.yield(%[[VAL_5]] : !fir.logical<4>)
+! CHECK: }
+
+! CHECK-LABEL: func.func @_QPsimple_reduction(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reductionEi"}
+! CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_3:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reductionEx"}
+! CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_3]] {uniq_name = "_QFsimple_reductionEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_5:.*]] = arith.constant 100 : index
+! CHECK: %[[VAL_6:.*]] = fir.shape %[[VAL_5]] : (index) -> !fir.shape<1>
+! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_6]]) {uniq_name = "_QFsimple_reductionEy"} : (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.ref<!fir.array<100x!fir.logical<4>>>)
+! CHECK: %[[VAL_8:.*]] = arith.constant true
+! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@eqv_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref<!fir.logical<4>>) for (%[[VAL_16:.*]]) : i32 = (%[[VAL_12]]) to (%[[VAL_13]]) inclusive step (%[[VAL_14]]) {
+! CHECK: fir.store %[[VAL_16]] to %[[VAL_11]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_15]] {uniq_name = "_QFsimple_reductionEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_17]]#0 : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_11]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_20:.*]] = fir.convert %[[VAL_19]] : (i32) -> i64
+! CHECK: %[[VAL_21:.*]] = hlfir.designate %[[VAL_7]]#0 (%[[VAL_20]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_22:.*]] = fir.load %[[VAL_21]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_23:.*]] = fir.convert %[[VAL_18]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_24:.*]] = fir.convert %[[VAL_22]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_25:.*]] = arith.cmpi eq, %[[VAL_23]], %[[VAL_24]] : i1
+! CHECK: %[[VAL_26:.*]] = fir.convert %[[VAL_25]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_26]] to %[[VAL_17]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: return
-!CHECK-LABEL: func.func @_QPsimple_reduction(
-!CHECK-SAME: %[[ARRAY:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
-!CHECK: %[[IREF:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reductionEi"}
-!CHECK: %[[XREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reductionEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[XREF]] {uniq_name = "_QFsimple_reductionEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
-!CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[ARRAY]](%4) {uniq_name = "_QFsimple_reductionEy"} : (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.ref<!fir.array<100x!fir.logical<4>>>)
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_NAME]] -> %[[X_DECL]]#0 : !fir.ref<!fir.logical<4>>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]]) {
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[I_PVT_64:.*]] = fir.convert %[[I_PVT]] : (i32) -> i64
-!CHECK: %[[Y_I_REF:.*]] = hlfir.designate %[[Y_DECL]]#0 (%[[I_PVT_64]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[Y_I_VAL:.*]] = fir.load %[[Y_I_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[Y_I_VAL]], %[[X_DECL]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
subroutine simple_reduction(y)
logical :: x, y(100)
x = .true.
@@ -50,28 +65,41 @@ subroutine simple_reduction(y)
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPsimple_reduction_switch_order(
-!CHECK-SAME: %[[ARRAY:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
-!CHECK: %[[IREF:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reduction_switch_orderEi"}
-!CHECK: %[[XREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reduction_switch_orderEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[XREF]] {uniq_name = "_QFsimple_reduction_switch_orderEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
-!CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[ARRAY]](%{{.*}}) {uniq_name = "_QFsimple_reduction_switch_orderEy"} : (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.ref<!fir.array<100x!fir.logical<4>>>)
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_NAME]] -> %[[X_DECL]]#0 : !fir.ref<!fir.logical<4>>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]]) {
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[CONVI_64:.*]] = fir.convert %[[I_PVT_VAL]] : (i32) -> i64
-!CHECK: %[[Y_I_REF:.*]] = hlfir.designate %[[Y_DECL]]#0 (%[[CONVI_64]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[YVAL:.*]] = fir.load %[[Y_I_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[YVAL]], %[[X_DECL]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+! CHECK-LABEL: func.func @_QPsimple_reduction_switch_order(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reduction_switch_orderEi"}
+! CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_3:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reduction_switch_orderEx"}
+! CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_3]] {uniq_name = "_QFsimple_reduction_switch_orderEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_5:.*]] = arith.constant 100 : index
+! CHECK: %[[VAL_6:.*]] = fir.shape %[[VAL_5]] : (index) -> !fir.shape<1>
+! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_6]]) {uniq_name = "_QFsimple_reduction_switch_orderEy"} : (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.ref<!fir.array<100x!fir.logical<4>>>)
+! CHECK: %[[VAL_8:.*]] = arith.constant true
+! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@eqv_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref<!fir.logical<4>>) for (%[[VAL_16:.*]]) : i32 = (%[[VAL_12]]) to (%[[VAL_13]]) inclusive step (%[[VAL_14]]) {
+! CHECK: fir.store %[[VAL_16]] to %[[VAL_11]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_15]] {uniq_name = "_QFsimple_reduction_switch_orderEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_11]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_18]] : (i32) -> i64
+! CHECK: %[[VAL_20:.*]] = hlfir.designate %[[VAL_7]]#0 (%[[VAL_19]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_21:.*]] = fir.load %[[VAL_20]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_22:.*]] = fir.load %[[VAL_17]]#0 : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_23:.*]] = fir.convert %[[VAL_21]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_24:.*]] = fir.convert %[[VAL_22]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_25:.*]] = arith.cmpi eq, %[[VAL_23]], %[[VAL_24]] : i1
+! CHECK: %[[VAL_26:.*]] = fir.convert %[[VAL_25]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_26]] to %[[VAL_17]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: return
+
subroutine simple_reduction_switch_order(y)
logical :: x, y(100)
x = .true.
@@ -84,44 +112,73 @@ subroutine simple_reduction_switch_order(y)
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPmultiple_reductions
-!CHECK-SAME %[[ARRAY:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "w"}) {
-!CHECK: %[[IREF:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFmultiple_reductionsEi"}
-!CHECK: %[[I_DECL:.*]]:2 = hlfir.declare %[[IREF]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[W_DECL:.*]]:2 = hlfir.declare %[[ARRAY]](%{{.*}}) {uniq_name = "_QFmultiple_reductionsEw"} : (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.ref<!fir.array<100x!fir.logical<4>>>)
-!CHECK: %[[XREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFmultiple_reductionsEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[XREF]] {uniq_name = "_QFmultiple_reductionsEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
-!CHECK: %[[YREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "y", uniq_name = "_QFmultiple_reductionsEy"}
-!CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[YREF]] {uniq_name = "_QFmultiple_reductionsEy"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
-!CHECK: %[[ZREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "z", uniq_name = "_QFmultiple_reductionsEz"}
-!CHECK: %[[Z_DECL:.*]]:2 = hlfir.declare %[[ZREF]] {uniq_name = "_QFmultiple_reductionsEz"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_NAME]] -> %[[X_DECL]]#0 : !fir.ref<!fir.logical<4>>, @[[RED_NAME]] -> %[[Y_DECL]]#0 :
-!!fir.ref<!fir.logical<4>>, @[[RED_NAME]] -> %[[Z_DECL]]#0 : !fir.ref<!fir.logical<4>>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]]) {
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL1:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[CONVI_64_1:.*]] = fir.convert %[[I_PVT_VAL1]] : (i32) -> i64
-!CHECK: %[[W_I_REF:.*]] = hlfir.designate %[[W_DECL]]#0 (%[[CONVI_64_1]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[W_I_VAL:.*]] = fir.load %[[W_I_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[W_I_VAL]], %[[X_DECL]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: %[[I_PVT_VAL2:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[CONVI_64_2:.*]] = fir.convert %[[I_PVT_VAL2]] : (i32) -> i64
-!CHECK: %[[W_I_REF:.*]] = hlfir.designate %[[W_DECL]]#0 (%[[CONVI_64_2]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[W_I_VAL:.*]] = fir.load %[[W_I_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[W_I_VAL]], %[[Y_DECL]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: %[[I_PVT_VAL2:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[CONVI_64_2:.*]] = fir.convert %[[I_PVT_VAL2]] : (i32) -> i64
-!CHECK: %[[W_I_REF:.*]] = hlfir.designate %[[W_DECL]]#0 (%[[CONVI_64_2]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[W_I_VAL:.*]] = fir.load %[[W_I_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[W_I_VAL]], %[[Z_DECL]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+! CHECK-LABEL: func.func @_QPmultiple_reductions(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "w"}) {
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFmultiple_reductionsEi"}
+! CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_3:.*]] = arith.constant 100 : index
+! CHECK: %[[VAL_4:.*]] = fir.shape %[[VAL_3]] : (index) -> !fir.shape<1>
+! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_4]]) {uniq_name = "_QFmultiple_reductionsEw"} : (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.ref<!fir.array<100x!fir.logical<4>>>)
+! CHECK: %[[VAL_6:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFmultiple_reductionsEx"}
+! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_6]] {uniq_name = "_QFmultiple_reductionsEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_8:.*]] = fir.alloca !fir.logical<4> {bindc_name = "y", uniq_name = "_QFmultiple_reductionsEy"}
+! CHECK: %[[VAL_9:.*]]:2 = hlfir.declare %[[VAL_8]] {uniq_name = "_QFmultiple_reductionsEy"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_10:.*]] = fir.alloca !fir.logical<4> {bindc_name = "z", uniq_name = "_QFmultiple_reductionsEz"}
+! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFmultiple_reductionsEz"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_12:.*]] = arith.constant true
+! CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_12]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_13]] to %[[VAL_7]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_14:.*]] = arith.constant true
+! CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_14]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_15]] to %[[VAL_9]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_16:.*]] = arith.constant true
+! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_17]] to %[[VAL_11]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_20:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_21:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_22:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@eqv_reduction %[[VAL_7]]#0 -> %[[VAL_23:.*]] : !fir.ref<!fir.logical<4>>, @eqv_reduction %[[VAL_9]]#0 -> %[[VAL_24:.*]] : !fir.ref<!fir.logical<4>>, @eqv_reduction %[[VAL_11]]#0 -> %[[VAL_25:.*]] : !fir.ref<!fir.logical<4>>) for (%[[VAL_26:.*]]) : i32 = (%[[VAL_20]]) to (%[[VAL_21]]) inclusive step (%[[VAL_22]]) {
+! CHECK: fir.store %[[VAL_26]] to %[[VAL_19]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_27:.*]]:2 = hlfir.declare %[[VAL_23]] {uniq_name = "_QFmultiple_reductionsEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_28:.*]]:2 = hlfir.declare %[[VAL_24]] {uniq_name = "_QFmultiple_reductionsEy"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_29:.*]]:2 = hlfir.declare %[[VAL_25]] {uniq_name = "_QFmultiple_reductionsEz"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_30:.*]] = fir.load %[[VAL_27]]#0 : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_31:.*]] = fir.load %[[VAL_19]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_32:.*]] = fir.convert %[[VAL_31]] : (i32) -> i64
+! CHECK: %[[VAL_33:.*]] = hlfir.designate %[[VAL_5]]#0 (%[[VAL_32]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_34:.*]] = fir.load %[[VAL_33]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_35:.*]] = fir.convert %[[VAL_30]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_36:.*]] = fir.convert %[[VAL_34]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_37:.*]] = arith.cmpi eq, %[[VAL_35]], %[[VAL_36]] : i1
+! CHECK: %[[VAL_38:.*]] = fir.convert %[[VAL_37]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_38]] to %[[VAL_27]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_39:.*]] = fir.load %[[VAL_28]]#0 : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_40:.*]] = fir.load %[[VAL_19]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_41:.*]] = fir.convert %[[VAL_40]] : (i32) -> i64
+! CHECK: %[[VAL_42:.*]] = hlfir.designate %[[VAL_5]]#0 (%[[VAL_41]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_43:.*]] = fir.load %[[VAL_42]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_44:.*]] = fir.convert %[[VAL_39]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_45:.*]] = fir.convert %[[VAL_43]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_46:.*]] = arith.cmpi eq, %[[VAL_44]], %[[VAL_45]] : i1
+! CHECK: %[[VAL_47:.*]] = fir.convert %[[VAL_46]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_47]] to %[[VAL_28]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_48:.*]] = fir.load %[[VAL_29]]#0 : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_49:.*]] = fir.load %[[VAL_19]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_50:.*]] = fir.convert %[[VAL_49]] : (i32) -> i64
+! CHECK: %[[VAL_51:.*]] = hlfir.designate %[[VAL_5]]#0 (%[[VAL_50]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_52:.*]] = fir.load %[[VAL_51]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_53:.*]] = fir.convert %[[VAL_48]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_54:.*]] = fir.convert %[[VAL_52]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_55:.*]] = arith.cmpi eq, %[[VAL_53]], %[[VAL_54]] : i1
+! CHECK: %[[VAL_56:.*]] = fir.convert %[[VAL_55]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_56]] to %[[VAL_29]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: return
+
subroutine multiple_reductions(w)
logical :: x,y,z,w(100)
x = .true.
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-logical-neqv.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-logical-neqv.f90
index 54227cb..623368a 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-logical-neqv.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-logical-neqv.f90
@@ -1,43 +1,58 @@
! RUN: bbc -emit-hlfir -fopenmp %s -o - | FileCheck %s
! RUN: %flang_fc1 -emit-hlfir -fopenmp %s -o - | FileCheck %s
-!CHECK-LABEL: omp.reduction.declare
-!CHECK-SAME: @[[RED_NAME:.*]] : !fir.logical<4> init {
-!CHECK: ^bb0(%{{.*}}: !fir.logical<4>):
-!CHECK: %false = arith.constant false
-!CHECK: %[[false_fir:.*]] = fir.convert %false : (i1) -> !fir.logical<4>
-!CHECK: omp.yield(%[[false_fir]] : !fir.logical<4>)
-!CHECK: } combiner {
-!CHECK: ^bb0(%[[ARG0:.*]]: !fir.logical<4>, %[[ARG1:.*]]: !fir.logical<4>):
-!CHECK: %[[arg0_i1:.*]] = fir.convert %[[ARG0]] : (!fir.logical<4>) -> i1
-!CHECK: %[[arg1_i1:.*]] = fir.convert %[[ARG1]] : (!fir.logical<4>) -> i1
-!CHECK: %[[RES:.*]] = arith.cmpi ne, %[[arg0_i1]], %[[arg1_i1]] : i1
-!CHECK: %[[RES_logical:.*]] = fir.convert %[[RES]] : (i1) -> !fir.logical<4>
-!CHECK: omp.yield(%[[RES_logical]] : !fir.logical<4>)
-!CHECK: }
+! NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
+
+! CHECK-LABEL: omp.reduction.declare @neqv_reduction : !fir.logical<4> init {
+! CHECK: ^bb0(%[[VAL_0:.*]]: !fir.logical<4>):
+! CHECK: %[[VAL_1:.*]] = arith.constant false
+! CHECK: %[[VAL_2:.*]] = fir.convert %[[VAL_1]] : (i1) -> !fir.logical<4>
+! CHECK: omp.yield(%[[VAL_2]] : !fir.logical<4>)
+
+! CHECK-LABEL: } combiner {
+! CHECK: ^bb0(%[[VAL_0:.*]]: !fir.logical<4>, %[[VAL_1:.*]]: !fir.logical<4>):
+! CHECK: %[[VAL_2:.*]] = fir.convert %[[VAL_0]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_3:.*]] = fir.convert %[[VAL_1]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_4:.*]] = arith.cmpi ne, %[[VAL_2]], %[[VAL_3]] : i1
+! CHECK: %[[VAL_5:.*]] = fir.convert %[[VAL_4]] : (i1) -> !fir.logical<4>
+! CHECK: omp.yield(%[[VAL_5]] : !fir.logical<4>)
+! CHECK: }
+
+! CHECK-LABEL: func.func @_QPsimple_reduction(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reductionEi"}
+! CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_3:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reductionEx"}
+! CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_3]] {uniq_name = "_QFsimple_reductionEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_5:.*]] = arith.constant 100 : index
+! CHECK: %[[VAL_6:.*]] = fir.shape %[[VAL_5]] : (index) -> !fir.shape<1>
+! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_6]]) {uniq_name = "_QFsimple_reductionEy"} : (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.ref<!fir.array<100x!fir.logical<4>>>)
+! CHECK: %[[VAL_8:.*]] = arith.constant true
+! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@neqv_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref<!fir.logical<4>>) for (%[[VAL_16:.*]]) : i32 = (%[[VAL_12]]) to (%[[VAL_13]]) inclusive step (%[[VAL_14]]) {
+! CHECK: fir.store %[[VAL_16]] to %[[VAL_11]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_15]] {uniq_name = "_QFsimple_reductionEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_17]]#0 : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_11]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_20:.*]] = fir.convert %[[VAL_19]] : (i32) -> i64
+! CHECK: %[[VAL_21:.*]] = hlfir.designate %[[VAL_7]]#0 (%[[VAL_20]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_22:.*]] = fir.load %[[VAL_21]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_23:.*]] = fir.convert %[[VAL_18]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_24:.*]] = fir.convert %[[VAL_22]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_25:.*]] = arith.cmpi ne, %[[VAL_23]], %[[VAL_24]] : i1
+! CHECK: %[[VAL_26:.*]] = fir.convert %[[VAL_25]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_26]] to %[[VAL_17]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: return
-!CHECK-LABEL: func.func @_QPsimple_reduction(
-!CHECK-SAME: %[[ARRAY:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
-!CHECK: %[[IREF:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reductionEi"}
-!CHECK: %[[XREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reductionEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[XREF]] {uniq_name = "_QFsimple_reductionEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
-!CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[ARRAY]](%4) {uniq_name = "_QFsimple_reductionEy"} : (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.ref<!fir.array<100x!fir.logical<4>>>)
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_NAME]] -> %[[X_DECL]]#0 : !fir.ref<!fir.logical<4>>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]]) {
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[I_PVT_64:.*]] = fir.convert %[[I_PVT]] : (i32) -> i64
-!CHECK: %[[Y_I_REF:.*]] = hlfir.designate %[[Y_DECL]]#0 (%[[I_PVT_64]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[Y_I_VAL:.*]] = fir.load %[[Y_I_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[Y_I_VAL]], %[[X_DECL]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
subroutine simple_reduction(y)
logical :: x, y(100)
x = .true.
@@ -50,28 +65,43 @@ subroutine simple_reduction(y)
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPsimple_reduction_switch_order(
-!CHECK-SAME: %[[ARRAY:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
-!CHECK: %[[IREF:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reduction_switch_orderEi"}
-!CHECK: %[[XREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reduction_switch_orderEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[XREF]] {uniq_name = "_QFsimple_reduction_switch_orderEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
-!CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[ARRAY]](%{{.*}}) {uniq_name = "_QFsimple_reduction_switch_orderEy"} : (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.ref<!fir.array<100x!fir.logical<4>>>)
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_NAME]] -> %[[X_DECL]]#0 : !fir.ref<!fir.logical<4>>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]]) {
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[CONVI_64:.*]] = fir.convert %[[I_PVT_VAL]] : (i32) -> i64
-!CHECK: %[[Y_I_REF:.*]] = hlfir.designate %[[Y_DECL]]#0 (%[[CONVI_64]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[YVAL:.*]] = fir.load %[[Y_I_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[YVAL]], %[[X_DECL]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+
+! CHECK-LABEL: func.func @_QPsimple_reduction_switch_order(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reduction_switch_orderEi"}
+! CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_3:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reduction_switch_orderEx"}
+! CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_3]] {uniq_name = "_QFsimple_reduction_switch_orderEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_5:.*]] = arith.constant 100 : index
+! CHECK: %[[VAL_6:.*]] = fir.shape %[[VAL_5]] : (index) -> !fir.shape<1>
+! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_6]]) {uniq_name = "_QFsimple_reduction_switch_orderEy"} : (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.ref<!fir.array<100x!fir.logical<4>>>)
+! CHECK: %[[VAL_8:.*]] = arith.constant true
+! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@neqv_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref<!fir.logical<4>>) for (%[[VAL_16:.*]]) : i32 = (%[[VAL_12]]) to (%[[VAL_13]]) inclusive step (%[[VAL_14]]) {
+! CHECK: fir.store %[[VAL_16]] to %[[VAL_11]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_15]] {uniq_name = "_QFsimple_reduction_switch_orderEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_11]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_18]] : (i32) -> i64
+! CHECK: %[[VAL_20:.*]] = hlfir.designate %[[VAL_7]]#0 (%[[VAL_19]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_21:.*]] = fir.load %[[VAL_20]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_22:.*]] = fir.load %[[VAL_17]]#0 : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_23:.*]] = fir.convert %[[VAL_21]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_24:.*]] = fir.convert %[[VAL_22]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_25:.*]] = arith.cmpi ne, %[[VAL_23]], %[[VAL_24]] : i1
+! CHECK: %[[VAL_26:.*]] = fir.convert %[[VAL_25]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_26]] to %[[VAL_17]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: return
+
+
subroutine simple_reduction_switch_order(y)
logical :: x, y(100)
x = .true.
@@ -84,44 +114,76 @@ subroutine simple_reduction_switch_order(y)
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPmultiple_reductions
-!CHECK-SAME %[[ARRAY:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "w"}) {
-!CHECK: %[[IREF:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFmultiple_reductionsEi"}
-!CHECK: %[[I_DECL:.*]]:2 = hlfir.declare %[[IREF]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[W_DECL:.*]]:2 = hlfir.declare %[[ARRAY]](%{{.*}}) {uniq_name = "_QFmultiple_reductionsEw"} : (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.ref<!fir.array<100x!fir.logical<4>>>)
-!CHECK: %[[XREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFmultiple_reductionsEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[XREF]] {uniq_name = "_QFmultiple_reductionsEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
-!CHECK: %[[YREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "y", uniq_name = "_QFmultiple_reductionsEy"}
-!CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[YREF]] {uniq_name = "_QFmultiple_reductionsEy"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
-!CHECK: %[[ZREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "z", uniq_name = "_QFmultiple_reductionsEz"}
-!CHECK: %[[Z_DECL:.*]]:2 = hlfir.declare %[[ZREF]] {uniq_name = "_QFmultiple_reductionsEz"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_NAME]] -> %[[X_DECL]]#0 : !fir.ref<!fir.logical<4>>, @[[RED_NAME]] -> %[[Y_DECL]]#0 :
-!!fir.ref<!fir.logical<4>>, @[[RED_NAME]] -> %[[Z_DECL]]#0 : !fir.ref<!fir.logical<4>>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]]) {
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL1:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[CONVI_64_1:.*]] = fir.convert %[[I_PVT_VAL1]] : (i32) -> i64
-!CHECK: %[[W_I_REF:.*]] = hlfir.designate %[[W_DECL]]#0 (%[[CONVI_64_1]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[W_I_VAL:.*]] = fir.load %[[W_I_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[W_I_VAL]], %[[X_DECL]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: %[[I_PVT_VAL2:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[CONVI_64_2:.*]] = fir.convert %[[I_PVT_VAL2]] : (i32) -> i64
-!CHECK: %[[W_I_REF:.*]] = hlfir.designate %[[W_DECL]]#0 (%[[CONVI_64_2]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[W_I_VAL:.*]] = fir.load %[[W_I_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[W_I_VAL]], %[[Y_DECL]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: %[[I_PVT_VAL2:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[CONVI_64_2:.*]] = fir.convert %[[I_PVT_VAL2]] : (i32) -> i64
-!CHECK: %[[W_I_REF:.*]] = hlfir.designate %[[W_DECL]]#0 (%[[CONVI_64_2]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[W_I_VAL:.*]] = fir.load %[[W_I_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[W_I_VAL]], %[[Z_DECL]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+
+! CHECK-LABEL: func.func @_QPmultiple_reductions(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "w"}) {
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFmultiple_reductionsEi"}
+! CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_3:.*]] = arith.constant 100 : index
+! CHECK: %[[VAL_4:.*]] = fir.shape %[[VAL_3]] : (index) -> !fir.shape<1>
+! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_4]]) {uniq_name = "_QFmultiple_reductionsEw"} : (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.ref<!fir.array<100x!fir.logical<4>>>)
+! CHECK: %[[VAL_6:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFmultiple_reductionsEx"}
+! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_6]] {uniq_name = "_QFmultiple_reductionsEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_8:.*]] = fir.alloca !fir.logical<4> {bindc_name = "y", uniq_name = "_QFmultiple_reductionsEy"}
+! CHECK: %[[VAL_9:.*]]:2 = hlfir.declare %[[VAL_8]] {uniq_name = "_QFmultiple_reductionsEy"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_10:.*]] = fir.alloca !fir.logical<4> {bindc_name = "z", uniq_name = "_QFmultiple_reductionsEz"}
+! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFmultiple_reductionsEz"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_12:.*]] = arith.constant true
+! CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_12]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_13]] to %[[VAL_7]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_14:.*]] = arith.constant true
+! CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_14]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_15]] to %[[VAL_9]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_16:.*]] = arith.constant true
+! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_17]] to %[[VAL_11]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_20:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_21:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_22:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@neqv_reduction %[[VAL_7]]#0 -> %[[VAL_23:.*]] : !fir.ref<!fir.logical<4>>, @neqv_reduction %[[VAL_9]]#0 -> %[[VAL_24:.*]] : !fir.ref<!fir.logical<4>>, @neqv_reduction %[[VAL_11]]#0 -> %[[VAL_25:.*]] : !fir.ref<!fir.logical<4>>) for (%[[VAL_26:.*]]) : i32 = (%[[VAL_20]]) to (%[[VAL_21]]) inclusive step (%[[VAL_22]]) {
+! CHECK: fir.store %[[VAL_26]] to %[[VAL_19]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_27:.*]]:2 = hlfir.declare %[[VAL_23]] {uniq_name = "_QFmultiple_reductionsEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_28:.*]]:2 = hlfir.declare %[[VAL_24]] {uniq_name = "_QFmultiple_reductionsEy"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_29:.*]]:2 = hlfir.declare %[[VAL_25]] {uniq_name = "_QFmultiple_reductionsEz"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_30:.*]] = fir.load %[[VAL_27]]#0 : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_31:.*]] = fir.load %[[VAL_19]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_32:.*]] = fir.convert %[[VAL_31]] : (i32) -> i64
+! CHECK: %[[VAL_33:.*]] = hlfir.designate %[[VAL_5]]#0 (%[[VAL_32]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_34:.*]] = fir.load %[[VAL_33]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_35:.*]] = fir.convert %[[VAL_30]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_36:.*]] = fir.convert %[[VAL_34]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_37:.*]] = arith.cmpi ne, %[[VAL_35]], %[[VAL_36]] : i1
+! CHECK: %[[VAL_38:.*]] = fir.convert %[[VAL_37]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_38]] to %[[VAL_27]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_39:.*]] = fir.load %[[VAL_28]]#0 : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_40:.*]] = fir.load %[[VAL_19]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_41:.*]] = fir.convert %[[VAL_40]] : (i32) -> i64
+! CHECK: %[[VAL_42:.*]] = hlfir.designate %[[VAL_5]]#0 (%[[VAL_41]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_43:.*]] = fir.load %[[VAL_42]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_44:.*]] = fir.convert %[[VAL_39]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_45:.*]] = fir.convert %[[VAL_43]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_46:.*]] = arith.cmpi ne, %[[VAL_44]], %[[VAL_45]] : i1
+! CHECK: %[[VAL_47:.*]] = fir.convert %[[VAL_46]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_47]] to %[[VAL_28]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_48:.*]] = fir.load %[[VAL_29]]#0 : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_49:.*]] = fir.load %[[VAL_19]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_50:.*]] = fir.convert %[[VAL_49]] : (i32) -> i64
+! CHECK: %[[VAL_51:.*]] = hlfir.designate %[[VAL_5]]#0 (%[[VAL_50]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_52:.*]] = fir.load %[[VAL_51]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_53:.*]] = fir.convert %[[VAL_48]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_54:.*]] = fir.convert %[[VAL_52]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_55:.*]] = arith.cmpi ne, %[[VAL_53]], %[[VAL_54]] : i1
+! CHECK: %[[VAL_56:.*]] = fir.convert %[[VAL_55]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_56]] to %[[VAL_29]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: return
+! CHECK: }
+
+
subroutine multiple_reductions(w)
logical :: x,y,z,w(100)
x = .true.
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-logical-or.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-logical-or.f90
index 4f59ea7..f1ae1bc 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-logical-or.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-logical-or.f90
@@ -1,43 +1,58 @@
! RUN: bbc -emit-hlfir -fopenmp %s -o - | FileCheck %s
! RUN: %flang_fc1 -emit-hlfir -fopenmp %s -o - | FileCheck %s
-!CHECK-LABEL: omp.reduction.declare
-!CHECK-SAME: @[[RED_NAME:.*]] : !fir.logical<4> init {
-!CHECK: ^bb0(%{{.*}}: !fir.logical<4>):
-!CHECK: %false = arith.constant false
-!CHECK: %[[false_fir:.*]] = fir.convert %false : (i1) -> !fir.logical<4>
-!CHECK: omp.yield(%[[false_fir]] : !fir.logical<4>)
-!CHECK: } combiner {
-!CHECK: ^bb0(%[[ARG0:.*]]: !fir.logical<4>, %[[ARG1:.*]]: !fir.logical<4>):
-!CHECK: %[[arg0_i1:.*]] = fir.convert %[[ARG0]] : (!fir.logical<4>) -> i1
-!CHECK: %[[arg1_i1:.*]] = fir.convert %[[ARG1]] : (!fir.logical<4>) -> i1
-!CHECK: %[[RES:.*]] = arith.ori %[[arg0_i1]], %[[arg1_i1]] : i1
-!CHECK: %[[RES_logical:.*]] = fir.convert %[[RES]] : (i1) -> !fir.logical<4>
-!CHECK: omp.yield(%[[RES_logical]] : !fir.logical<4>)
-!CHECK: }
+! NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
+
+! CHECK-LABEL: omp.reduction.declare @or_reduction : !fir.logical<4> init {
+! CHECK: ^bb0(%[[VAL_0:.*]]: !fir.logical<4>):
+! CHECK: %[[VAL_1:.*]] = arith.constant false
+! CHECK: %[[VAL_2:.*]] = fir.convert %[[VAL_1]] : (i1) -> !fir.logical<4>
+! CHECK: omp.yield(%[[VAL_2]] : !fir.logical<4>)
+
+! CHECK-LABEL: } combiner {
+! CHECK: ^bb0(%[[VAL_0:.*]]: !fir.logical<4>, %[[VAL_1:.*]]: !fir.logical<4>):
+! CHECK: %[[VAL_2:.*]] = fir.convert %[[VAL_0]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_3:.*]] = fir.convert %[[VAL_1]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_4:.*]] = arith.ori %[[VAL_2]], %[[VAL_3]] : i1
+! CHECK: %[[VAL_5:.*]] = fir.convert %[[VAL_4]] : (i1) -> !fir.logical<4>
+! CHECK: omp.yield(%[[VAL_5]] : !fir.logical<4>)
+! CHECK: }
+
+! CHECK-LABEL: func.func @_QPsimple_reduction(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reductionEi"}
+! CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_3:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reductionEx"}
+! CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_3]] {uniq_name = "_QFsimple_reductionEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_5:.*]] = arith.constant 100 : index
+! CHECK: %[[VAL_6:.*]] = fir.shape %[[VAL_5]] : (index) -> !fir.shape<1>
+! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_6]]) {uniq_name = "_QFsimple_reductionEy"} : (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.ref<!fir.array<100x!fir.logical<4>>>)
+! CHECK: %[[VAL_8:.*]] = arith.constant true
+! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@or_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref<!fir.logical<4>>) for (%[[VAL_16:.*]]) : i32 = (%[[VAL_12]]) to (%[[VAL_13]]) inclusive step (%[[VAL_14]]) {
+! CHECK: fir.store %[[VAL_16]] to %[[VAL_11]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_15]] {uniq_name = "_QFsimple_reductionEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_17]]#0 : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_11]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_20:.*]] = fir.convert %[[VAL_19]] : (i32) -> i64
+! CHECK: %[[VAL_21:.*]] = hlfir.designate %[[VAL_7]]#0 (%[[VAL_20]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_22:.*]] = fir.load %[[VAL_21]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_23:.*]] = fir.convert %[[VAL_18]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_24:.*]] = fir.convert %[[VAL_22]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_25:.*]] = arith.ori %[[VAL_23]], %[[VAL_24]] : i1
+! CHECK: %[[VAL_26:.*]] = fir.convert %[[VAL_25]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_26]] to %[[VAL_17]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: return
-!CHECK-LABEL: func.func @_QPsimple_reduction(
-!CHECK-SAME: %[[ARRAY:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
-!CHECK: %[[IREF:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reductionEi"}
-!CHECK: %[[XREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reductionEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[XREF]] {uniq_name = "_QFsimple_reductionEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
-!CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[ARRAY]](%4) {uniq_name = "_QFsimple_reductionEy"} : (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.ref<!fir.array<100x!fir.logical<4>>>)
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_NAME]] -> %[[X_DECL]]#0 : !fir.ref<!fir.logical<4>>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]]) {
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[I_PVT_64:.*]] = fir.convert %[[I_PVT]] : (i32) -> i64
-!CHECK: %[[Y_I_REF:.*]] = hlfir.designate %[[Y_DECL]]#0 (%[[I_PVT_64]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[Y_I_VAL:.*]] = fir.load %[[Y_I_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[Y_I_VAL]], %[[X_DECL]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
subroutine simple_reduction(y)
logical :: x, y(100)
x = .true.
@@ -50,28 +65,41 @@ subroutine simple_reduction(y)
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPsimple_reduction_switch_order(
-!CHECK-SAME: %[[ARRAY:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
-!CHECK: %[[IREF:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reduction_switch_orderEi"}
-!CHECK: %[[XREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reduction_switch_orderEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[XREF]] {uniq_name = "_QFsimple_reduction_switch_orderEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
-!CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[ARRAY]](%{{.*}}) {uniq_name = "_QFsimple_reduction_switch_orderEy"} : (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.ref<!fir.array<100x!fir.logical<4>>>)
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_NAME]] -> %[[X_DECL]]#0 : !fir.ref<!fir.logical<4>>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]]) {
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[CONVI_64:.*]] = fir.convert %[[I_PVT_VAL]] : (i32) -> i64
-!CHECK: %[[Y_I_REF:.*]] = hlfir.designate %[[Y_DECL]]#0 (%[[CONVI_64]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[YVAL:.*]] = fir.load %[[Y_I_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[YVAL]], %[[X_DECL]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+! CHECK-LABEL: func.func @_QPsimple_reduction_switch_order(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "y"}) {
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_reduction_switch_orderEi"}
+! CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_3:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFsimple_reduction_switch_orderEx"}
+! CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_3]] {uniq_name = "_QFsimple_reduction_switch_orderEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_5:.*]] = arith.constant 100 : index
+! CHECK: %[[VAL_6:.*]] = fir.shape %[[VAL_5]] : (index) -> !fir.shape<1>
+! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_6]]) {uniq_name = "_QFsimple_reduction_switch_orderEy"} : (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.ref<!fir.array<100x!fir.logical<4>>>)
+! CHECK: %[[VAL_8:.*]] = arith.constant true
+! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@or_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref<!fir.logical<4>>) for (%[[VAL_16:.*]]) : i32 = (%[[VAL_12]]) to (%[[VAL_13]]) inclusive step (%[[VAL_14]]) {
+! CHECK: fir.store %[[VAL_16]] to %[[VAL_11]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_15]] {uniq_name = "_QFsimple_reduction_switch_orderEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_11]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_18]] : (i32) -> i64
+! CHECK: %[[VAL_20:.*]] = hlfir.designate %[[VAL_7]]#0 (%[[VAL_19]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_21:.*]] = fir.load %[[VAL_20]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_22:.*]] = fir.load %[[VAL_17]]#0 : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_23:.*]] = fir.convert %[[VAL_21]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_24:.*]] = fir.convert %[[VAL_22]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_25:.*]] = arith.ori %[[VAL_23]], %[[VAL_24]] : i1
+! CHECK: %[[VAL_26:.*]] = fir.convert %[[VAL_25]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_26]] to %[[VAL_17]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: return
+
subroutine simple_reduction_switch_order(y)
logical :: x, y(100)
x = .true.
@@ -84,44 +112,75 @@ subroutine simple_reduction_switch_order(y)
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPmultiple_reductions
-!CHECK-SAME %[[ARRAY:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "w"}) {
-!CHECK: %[[IREF:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFmultiple_reductionsEi"}
-!CHECK: %[[I_DECL:.*]]:2 = hlfir.declare %[[IREF]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[W_DECL:.*]]:2 = hlfir.declare %[[ARRAY]](%{{.*}}) {uniq_name = "_QFmultiple_reductionsEw"} : (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.ref<!fir.array<100x!fir.logical<4>>>)
-!CHECK: %[[XREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFmultiple_reductionsEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[XREF]] {uniq_name = "_QFmultiple_reductionsEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
-!CHECK: %[[YREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "y", uniq_name = "_QFmultiple_reductionsEy"}
-!CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[YREF]] {uniq_name = "_QFmultiple_reductionsEy"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
-!CHECK: %[[ZREF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "z", uniq_name = "_QFmultiple_reductionsEz"}
-!CHECK: %[[Z_DECL:.*]]:2 = hlfir.declare %[[ZREF]] {uniq_name = "_QFmultiple_reductionsEz"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 100 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_NAME]] -> %[[X_DECL]]#0 : !fir.ref<!fir.logical<4>>, @[[RED_NAME]] -> %[[Y_DECL]]#0 :
-!!fir.ref<!fir.logical<4>>, @[[RED_NAME]] -> %[[Z_DECL]]#0 : !fir.ref<!fir.logical<4>>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]]) {
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL1:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[CONVI_64_1:.*]] = fir.convert %[[I_PVT_VAL1]] : (i32) -> i64
-!CHECK: %[[W_I_REF:.*]] = hlfir.designate %[[W_DECL]]#0 (%[[CONVI_64_1]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[W_I_VAL:.*]] = fir.load %[[W_I_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[W_I_VAL]], %[[X_DECL]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: %[[I_PVT_VAL2:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[CONVI_64_2:.*]] = fir.convert %[[I_PVT_VAL2]] : (i32) -> i64
-!CHECK: %[[W_I_REF:.*]] = hlfir.designate %[[W_DECL]]#0 (%[[CONVI_64_2]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[W_I_VAL:.*]] = fir.load %[[W_I_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[W_I_VAL]], %[[Y_DECL]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: %[[I_PVT_VAL2:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[CONVI_64_2:.*]] = fir.convert %[[I_PVT_VAL2]] : (i32) -> i64
-!CHECK: %[[W_I_REF:.*]] = hlfir.designate %[[W_DECL]]#0 (%[[CONVI_64_2]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
-!CHECK: %[[W_I_VAL:.*]] = fir.load %[[W_I_REF]] : !fir.ref<!fir.logical<4>>
-!CHECK: omp.reduction %[[W_I_VAL]], %[[Z_DECL]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+! CHECK-LABEL: func.func @_QPmultiple_reductions(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<100x!fir.logical<4>>> {fir.bindc_name = "w"}) {
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFmultiple_reductionsEi"}
+! CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_3:.*]] = arith.constant 100 : index
+! CHECK: %[[VAL_4:.*]] = fir.shape %[[VAL_3]] : (index) -> !fir.shape<1>
+! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_4]]) {uniq_name = "_QFmultiple_reductionsEw"} : (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100x!fir.logical<4>>>, !fir.ref<!fir.array<100x!fir.logical<4>>>)
+! CHECK: %[[VAL_6:.*]] = fir.alloca !fir.logical<4> {bindc_name = "x", uniq_name = "_QFmultiple_reductionsEx"}
+! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_6]] {uniq_name = "_QFmultiple_reductionsEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_8:.*]] = fir.alloca !fir.logical<4> {bindc_name = "y", uniq_name = "_QFmultiple_reductionsEy"}
+! CHECK: %[[VAL_9:.*]]:2 = hlfir.declare %[[VAL_8]] {uniq_name = "_QFmultiple_reductionsEy"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_10:.*]] = fir.alloca !fir.logical<4> {bindc_name = "z", uniq_name = "_QFmultiple_reductionsEz"}
+! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFmultiple_reductionsEz"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_12:.*]] = arith.constant true
+! CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_12]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_13]] to %[[VAL_7]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_14:.*]] = arith.constant true
+! CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_14]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_15]] to %[[VAL_9]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_16:.*]] = arith.constant true
+! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_17]] to %[[VAL_11]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_20:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_21:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_22:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@or_reduction %[[VAL_7]]#0 -> %[[VAL_23:.*]] : !fir.ref<!fir.logical<4>>, @or_reduction %[[VAL_9]]#0 -> %[[VAL_24:.*]] : !fir.ref<!fir.logical<4>>, @or_reduction %[[VAL_11]]#0 -> %[[VAL_25:.*]] : !fir.ref<!fir.logical<4>>) for (%[[VAL_26:.*]]) : i32 = (%[[VAL_20]]) to (%[[VAL_21]]) inclusive step (%[[VAL_22]]) {
+! CHECK: fir.store %[[VAL_26]] to %[[VAL_19]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_27:.*]]:2 = hlfir.declare %[[VAL_23]] {uniq_name = "_QFmultiple_reductionsEx"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_28:.*]]:2 = hlfir.declare %[[VAL_24]] {uniq_name = "_QFmultiple_reductionsEy"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_29:.*]]:2 = hlfir.declare %[[VAL_25]] {uniq_name = "_QFmultiple_reductionsEz"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+! CHECK: %[[VAL_30:.*]] = fir.load %[[VAL_27]]#0 : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_31:.*]] = fir.load %[[VAL_19]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_32:.*]] = fir.convert %[[VAL_31]] : (i32) -> i64
+! CHECK: %[[VAL_33:.*]] = hlfir.designate %[[VAL_5]]#0 (%[[VAL_32]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_34:.*]] = fir.load %[[VAL_33]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_35:.*]] = fir.convert %[[VAL_30]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_36:.*]] = fir.convert %[[VAL_34]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_37:.*]] = arith.ori %[[VAL_35]], %[[VAL_36]] : i1
+! CHECK: %[[VAL_38:.*]] = fir.convert %[[VAL_37]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_38]] to %[[VAL_27]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_39:.*]] = fir.load %[[VAL_28]]#0 : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_40:.*]] = fir.load %[[VAL_19]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_41:.*]] = fir.convert %[[VAL_40]] : (i32) -> i64
+! CHECK: %[[VAL_42:.*]] = hlfir.designate %[[VAL_5]]#0 (%[[VAL_41]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_43:.*]] = fir.load %[[VAL_42]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_44:.*]] = fir.convert %[[VAL_39]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_45:.*]] = fir.convert %[[VAL_43]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_46:.*]] = arith.ori %[[VAL_44]], %[[VAL_45]] : i1
+! CHECK: %[[VAL_47:.*]] = fir.convert %[[VAL_46]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_47]] to %[[VAL_28]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_48:.*]] = fir.load %[[VAL_29]]#0 : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_49:.*]] = fir.load %[[VAL_19]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_50:.*]] = fir.convert %[[VAL_49]] : (i32) -> i64
+! CHECK: %[[VAL_51:.*]] = hlfir.designate %[[VAL_5]]#0 (%[[VAL_50]]) : (!fir.ref<!fir.array<100x!fir.logical<4>>>, i64) -> !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_52:.*]] = fir.load %[[VAL_51]] : !fir.ref<!fir.logical<4>>
+! CHECK: %[[VAL_53:.*]] = fir.convert %[[VAL_48]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_54:.*]] = fir.convert %[[VAL_52]] : (!fir.logical<4>) -> i1
+! CHECK: %[[VAL_55:.*]] = arith.ori %[[VAL_53]], %[[VAL_54]] : i1
+! CHECK: %[[VAL_56:.*]] = fir.convert %[[VAL_55]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.assign %[[VAL_56]] to %[[VAL_29]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: return
+
+
+
subroutine multiple_reductions(w)
logical :: x,y,z,w(100)
x = .true.
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-max-2.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-max-2.f90
index 7e07947..1f4d619 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-max-2.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-max-2.f90
@@ -2,7 +2,8 @@
! RUN: %flang_fc1 -emit-hlfir -fopenmp -o - %s 2>&1 | FileCheck %s
! CHECK: omp.wsloop reduction(@max_i_32
-! CHECK: omp.reduction
+! CHECK: arith.cmpi sgt
+! CHECK: arith.select
module m1
intrinsic max
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-max-hlfir.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-max-hlfir.f90
index 0c5d992..ed25ced 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-max-hlfir.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-max-hlfir.f90
@@ -1,26 +1,48 @@
! RUN: bbc -emit-hlfir -fopenmp -o - %s 2>&1 | FileCheck %s
! RUN: %flang_fc1 -emit-hlfir -fopenmp -o - %s 2>&1 | FileCheck %s
-!CHECK: omp.reduction.declare @[[MAX_DECLARE_I:.*]] : i32 init {
-!CHECK: %[[MINIMUM_VAL_I:.*]] = arith.constant -2147483648 : i32
-!CHECK: omp.yield(%[[MINIMUM_VAL_I]] : i32)
-!CHECK: combiner
-!CHECK: ^bb0(%[[ARG0_I:.*]]: i32, %[[ARG1_I:.*]]: i32):
-!CHECK: %[[COMB_VAL_I:.*]] = arith.maxsi %[[ARG0_I]], %[[ARG1_I]] : i32
-!CHECK: omp.yield(%[[COMB_VAL_I]] : i32)
+! NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
+
+! CHECK-LABEL: omp.reduction.declare @max_i_32 : i32 init {
+! CHECK: ^bb0(%[[VAL_0:.*]]: i32):
+! CHECK: %[[VAL_1:.*]] = arith.constant -2147483648 : i32
+! CHECK: omp.yield(%[[VAL_1]] : i32)
+
+! CHECK-LABEL: } combiner {
+! CHECK: ^bb0(%[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: i32):
+! CHECK: %[[VAL_2:.*]] = arith.maxsi %[[VAL_0]], %[[VAL_1]] : i32
+! CHECK: omp.yield(%[[VAL_2]] : i32)
+! CHECK: }
+
+! CHECK-LABEL: func.func @_QPreduction_max_int(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "y"}) {
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFreduction_max_intEi"}
+! CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QFreduction_max_intEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_3:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFreduction_max_intEx"}
+! CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_3]] {uniq_name = "_QFreduction_max_intEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFreduction_max_intEy"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
+! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32
+! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : i32, !fir.ref<i32>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_max_intEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_11:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@max_i_32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref<i32>) for (%[[VAL_13:.*]]) : i32 = (%[[VAL_9]]) to (%[[VAL_10]]) inclusive step (%[[VAL_11]]) {
+! CHECK: fir.store %[[VAL_13]] to %[[VAL_8]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_12]] {uniq_name = "_QFreduction_max_intEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_8]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_15]] : (i32) -> i64
+! CHECK: %[[VAL_17:.*]] = hlfir.designate %[[VAL_5]]#0 (%[[VAL_16]]) : (!fir.box<!fir.array<?xi32>>, i64) -> !fir.ref<i32>
+! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_14]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_17]] : !fir.ref<i32>
+! CHECK: %[[VAL_20:.*]] = arith.cmpi sgt, %[[VAL_18]], %[[VAL_19]] : i32
+! CHECK: %[[VAL_21:.*]] = arith.select %[[VAL_20]], %[[VAL_18]], %[[VAL_19]] : i32
+! CHECK: hlfir.assign %[[VAL_21]] to %[[VAL_14]]#0 : i32, !fir.ref<i32>
+! CHECK: omp.yield
+! CHECK: omp.terminator
-!CHECK-LABEL: @_QPreduction_max_int
-!CHECK-SAME: %[[Y_BOX:.*]]: !fir.box<!fir.array<?xi32>>
-!CHECK: %[[X_REF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFreduction_max_intEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[X_REF]] {uniq_name = "_QFreduction_max_intEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[Y_BOX]] {uniq_name = "_QFreduction_max_intEy"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
-!CHECK: omp.parallel
-!CHECK: omp.wsloop reduction(@[[MAX_DECLARE_I]] -> %[[X_DECL]]#0 : !fir.ref<i32>) for
-!CHECK: %[[Y_I_REF:.*]] = hlfir.designate %[[Y_DECL]]#0 ({{.*}}) : (!fir.box<!fir.array<?xi32>>, i64) -> !fir.ref<i32>
-!CHECK: %[[Y_I:.*]] = fir.load %[[Y_I_REF]] : !fir.ref<i32>
-!CHECK: omp.reduction %[[Y_I]], %[[X_DECL]]#0 : i32, !fir.ref<i32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
subroutine reduction_max_int(y)
integer :: x, y(:)
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-max.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-max.f90
index a2c4b54..ea3b1be 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-max.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-max.f90
@@ -1,56 +1,114 @@
! RUN: bbc -emit-hlfir -fopenmp -o - %s 2>&1 | FileCheck %s
! RUN: %flang_fc1 -emit-hlfir -fopenmp -o - %s 2>&1 | FileCheck %s
-!CHECK: omp.reduction.declare @[[MAX_DECLARE_F:.*]] : f32 init {
-!CHECK: %[[MINIMUM_VAL_F:.*]] = arith.constant -3.40282347E+38 : f32
-!CHECK: omp.yield(%[[MINIMUM_VAL_F]] : f32)
-!CHECK: combiner
-!CHECK: ^bb0(%[[ARG0_F:.*]]: f32, %[[ARG1_F:.*]]: f32):
-!CHECK: %[[COMB_VAL_F:.*]] = arith.maximumf %[[ARG0_F]], %[[ARG1_F]] {{.*}}: f32
-!CHECK: omp.yield(%[[COMB_VAL_F]] : f32)
+! NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
-!CHECK: omp.reduction.declare @[[MAX_DECLARE_I:.*]] : i32 init {
-!CHECK: %[[MINIMUM_VAL_I:.*]] = arith.constant -2147483648 : i32
-!CHECK: omp.yield(%[[MINIMUM_VAL_I]] : i32)
-!CHECK: combiner
-!CHECK: ^bb0(%[[ARG0_I:.*]]: i32, %[[ARG1_I:.*]]: i32):
-!CHECK: %[[COMB_VAL_I:.*]] = arith.maxsi %[[ARG0_I]], %[[ARG1_I]] : i32
-!CHECK: omp.yield(%[[COMB_VAL_I]] : i32)
+! CHECK-LABEL: omp.reduction.declare @max_f_32 : f32 init {
+! CHECK: ^bb0(%[[VAL_0:.*]]: f32):
+! CHECK: %[[VAL_1:.*]] = arith.constant -3.40282347E+38 : f32
+! CHECK: omp.yield(%[[VAL_1]] : f32)
+
+! CHECK-LABEL: } combiner {
+! CHECK: ^bb0(%[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: f32):
+! CHECK: %[[VAL_2:.*]] = arith.maximumf %[[VAL_0]], %[[VAL_1]] fastmath<contract> : f32
+! CHECK: omp.yield(%[[VAL_2]] : f32)
+! CHECK: }
+
+! CHECK-LABEL: omp.reduction.declare @max_i_32 : i32 init {
+! CHECK: ^bb0(%[[VAL_0:.*]]: i32):
+! CHECK: %[[VAL_1:.*]] = arith.constant -2147483648 : i32
+! CHECK: omp.yield(%[[VAL_1]] : i32)
+
+! CHECK-LABEL: } combiner {
+! CHECK: ^bb0(%[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: i32):
+! CHECK: %[[VAL_2:.*]] = arith.maxsi %[[VAL_0]], %[[VAL_1]] : i32
+! CHECK: omp.yield(%[[VAL_2]] : i32)
+! CHECK: }
+
+! CHECK-LABEL: func.func @_QPreduction_max_int(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "y"}) {
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFreduction_max_intEi"}
+! CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QFreduction_max_intEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_3:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFreduction_max_intEx"}
+! CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_3]] {uniq_name = "_QFreduction_max_intEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFreduction_max_intEy"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
+! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32
+! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : i32, !fir.ref<i32>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_max_intEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_11:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@max_i_32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref<i32>) for (%[[VAL_13:.*]]) : i32 = (%[[VAL_9]]) to (%[[VAL_10]]) inclusive step (%[[VAL_11]]) {
+! CHECK: fir.store %[[VAL_13]] to %[[VAL_8]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_12]] {uniq_name = "_QFreduction_max_intEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_8]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_15]] : (i32) -> i64
+! CHECK: %[[VAL_17:.*]] = hlfir.designate %[[VAL_5]]#0 (%[[VAL_16]]) : (!fir.box<!fir.array<?xi32>>, i64) -> !fir.ref<i32>
+! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_14]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_17]] : !fir.ref<i32>
+! CHECK: %[[VAL_20:.*]] = arith.cmpi sgt, %[[VAL_18]], %[[VAL_19]] : i32
+! CHECK: %[[VAL_21:.*]] = arith.select %[[VAL_20]], %[[VAL_18]], %[[VAL_19]] : i32
+! CHECK: hlfir.assign %[[VAL_21]] to %[[VAL_14]]#0 : i32, !fir.ref<i32>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+
+! CHECK-LABEL: func.func @_QPreduction_max_real(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.box<!fir.array<?xf32>> {fir.bindc_name = "y"}) {
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFreduction_max_realEi"}
+! CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QFreduction_max_realEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_3:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFreduction_max_realEx"}
+! CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_3]] {uniq_name = "_QFreduction_max_realEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFreduction_max_realEy"} : (!fir.box<!fir.array<?xf32>>) -> (!fir.box<!fir.array<?xf32>>, !fir.box<!fir.array<?xf32>>)
+! CHECK: %[[VAL_6:.*]] = arith.constant 0.000000e+00 : f32
+! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : f32, !fir.ref<f32>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_max_realEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_11:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@max_f_32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref<f32>) for (%[[VAL_13:.*]]) : i32 = (%[[VAL_9]]) to (%[[VAL_10]]) inclusive step (%[[VAL_11]]) {
+! CHECK: fir.store %[[VAL_13]] to %[[VAL_8]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_12]] {uniq_name = "_QFreduction_max_realEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_8]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_15]] : (i32) -> i64
+! CHECK: %[[VAL_17:.*]] = hlfir.designate %[[VAL_5]]#0 (%[[VAL_16]]) : (!fir.box<!fir.array<?xf32>>, i64) -> !fir.ref<f32>
+! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_17]] : !fir.ref<f32>
+! CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_14]]#0 : !fir.ref<f32>
+! CHECK: %[[VAL_20:.*]] = arith.cmpf ogt, %[[VAL_18]], %[[VAL_19]] fastmath<contract> : f32
+! CHECK: %[[VAL_21:.*]] = arith.select %[[VAL_20]], %[[VAL_18]], %[[VAL_19]] : f32
+! CHECK: hlfir.assign %[[VAL_21]] to %[[VAL_14]]#0 : f32, !fir.ref<f32>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_30:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_31:.*]]:2 = hlfir.declare %[[VAL_30]] {uniq_name = "_QFreduction_max_realEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_32:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_33:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_34:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@max_f_32 %[[VAL_4]]#0 -> %[[VAL_35:.*]] : !fir.ref<f32>) for (%[[VAL_36:.*]]) : i32 = (%[[VAL_32]]) to (%[[VAL_33]]) inclusive step (%[[VAL_34]]) {
+! CHECK: fir.store %[[VAL_36]] to %[[VAL_31]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_37:.*]]:2 = hlfir.declare %[[VAL_35]] {uniq_name = "_QFreduction_max_realEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_38:.*]] = fir.load %[[VAL_31]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_39:.*]] = fir.convert %[[VAL_38]] : (i32) -> i64
+! CHECK: %[[VAL_40:.*]] = hlfir.designate %[[VAL_5]]#0 (%[[VAL_39]]) : (!fir.box<!fir.array<?xf32>>, i64) -> !fir.ref<f32>
+! CHECK: %[[VAL_41:.*]] = fir.load %[[VAL_40]] : !fir.ref<f32>
+! CHECK: %[[VAL_42:.*]] = fir.load %[[VAL_37]]#0 : !fir.ref<f32>
+! CHECK: %[[VAL_43:.*]] = arith.cmpf ogt, %[[VAL_41]], %[[VAL_42]] fastmath<contract> : f32
+! CHECK: fir.if %[[VAL_43]] {
+! CHECK: %[[VAL_44:.*]] = fir.load %[[VAL_31]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_45:.*]] = fir.convert %[[VAL_44]] : (i32) -> i64
+! CHECK: %[[VAL_46:.*]] = hlfir.designate %[[VAL_5]]#0 (%[[VAL_45]]) : (!fir.box<!fir.array<?xf32>>, i64) -> !fir.ref<f32>
+! CHECK: %[[VAL_47:.*]] = fir.load %[[VAL_46]] : !fir.ref<f32>
+! CHECK: hlfir.assign %[[VAL_47]] to %[[VAL_37]]#0 : f32, !fir.ref<f32>
+! CHECK: } else {
+! CHECK: }
+! CHECK: omp.yield
+! CHECK: omp.terminator
-!CHECK-LABEL: @_QPreduction_max_int
-!CHECK-SAME: %[[Y_BOX:.*]]: !fir.box<!fir.array<?xi32>>
-!CHECK: %[[X_REF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFreduction_max_intEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[X_REF]] {uniq_name = "_QFreduction_max_intEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[Y_BOX]] {uniq_name = "_QFreduction_max_intEy"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
-!CHECK: omp.parallel
-!CHECK: %[[I_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_DECL:.*]]:2 = hlfir.declare %[[I_REF]] {uniq_name = "_QFreduction_max_intEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: omp.wsloop reduction(@[[MAX_DECLARE_I]] -> %[[X_DECL]]#0 : !fir.ref<i32>) for
-!CHECK: fir.store %arg1 to %[[I_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_32:.*]] = fir.load %[[I_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[I_64:.*]] = fir.convert %[[I_32]] : (i32) -> i64
-!CHECK: %[[Y_I_REF:.*]] = hlfir.designate %[[Y_DECL]]#0 (%[[I_64]]) : (!fir.box<!fir.array<?xi32>>, i64) -> !fir.ref<i32>
-!CHECK: %[[Y_I_VAL:.*]] = fir.load %[[Y_I_REF]] : !fir.ref<i32>
-!CHECK: omp.reduction %[[Y_I_VAL]], %[[X_DECL]]#0 : i32, !fir.ref<i32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK-LABEL: @_QPreduction_max_real
-!CHECK-SAME: %[[Y_BOX:.*]]: !fir.box<!fir.array<?xf32>>
-!CHECK: %[[X_REF:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFreduction_max_realEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[X_REF]] {uniq_name = "_QFreduction_max_realEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
-!CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[Y_BOX]] {uniq_name = "_QFreduction_max_realEy"} : (!fir.box<!fir.array<?xf32>>) -> (!fir.box<!fir.array<?xf32>>, !fir.box<!fir.array<?xf32>>)
-!CHECK: omp.parallel
-!CHECK: %[[I_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_DECL:.*]]:2 = hlfir.declare %[[I_REF]] {uniq_name = "_QFreduction_max_realEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: omp.wsloop reduction(@[[MAX_DECLARE_F]] -> %[[X_DECL]]#0 : !fir.ref<f32>) for
-!CHECK: fir.store %arg1 to %[[I_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_32:.*]] = fir.load %[[I_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[I_64:.*]] = fir.convert %[[I_32]] : (i32) -> i64
-!CHECK: %[[Y_I_REF:.*]] = hlfir.designate %[[Y_DECL]]#0 (%[[I_64]]) : (!fir.box<!fir.array<?xf32>>, i64) -> !fir.ref<f32>
-!CHECK: %[[Y_I_VAL:.*]] = fir.load %[[Y_I_REF]] : !fir.ref<f32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
subroutine reduction_max_int(y)
integer :: x, y(:)
@@ -80,7 +138,6 @@ subroutine reduction_max_real(y)
!$omp parallel
!$omp do reduction(max:x)
do i=1, 100
- !CHECK-NOT: omp.reduction
if (y(i) .gt. x) x = y(i)
end do
!$omp end do
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-min.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-min.f90
index af7f718..3aa9001 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-min.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-min.f90
@@ -1,56 +1,116 @@
! RUN: bbc -emit-hlfir -fopenmp -o - %s 2>&1 | FileCheck %s
! RUN: %flang_fc1 -emit-hlfir -fopenmp -o - %s 2>&1 | FileCheck %s
-!CHECK: omp.reduction.declare @[[MIN_DECLARE_F:.*]] : f32 init {
-!CHECK: %[[MAXIMUM_VAL_F:.*]] = arith.constant 3.40282347E+38 : f32
-!CHECK: omp.yield(%[[MAXIMUM_VAL_F]] : f32)
-!CHECK: combiner
-!CHECK: ^bb0(%[[ARG0_F:.*]]: f32, %[[ARG1_F:.*]]: f32):
-!CHECK: %[[COMB_VAL_F:.*]] = arith.minimumf %[[ARG0_F]], %[[ARG1_F]] {{.*}}: f32
-!CHECK: omp.yield(%[[COMB_VAL_F]] : f32)
+! NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
-!CHECK: omp.reduction.declare @[[MIN_DECLARE_I:.*]] : i32 init {
-!CHECK: %[[MAXIMUM_VAL_I:.*]] = arith.constant 2147483647 : i32
-!CHECK: omp.yield(%[[MAXIMUM_VAL_I]] : i32)
-!CHECK: combiner
-!CHECK: ^bb0(%[[ARG0_I:.*]]: i32, %[[ARG1_I:.*]]: i32):
-!CHECK: %[[COMB_VAL_I:.*]] = arith.minsi %[[ARG0_I]], %[[ARG1_I]] : i32
-!CHECK: omp.yield(%[[COMB_VAL_I]] : i32)
+! CHECK-LABEL: omp.reduction.declare @min_f_32 : f32 init {
+! CHECK: ^bb0(%[[VAL_0:.*]]: f32):
+! CHECK: %[[VAL_1:.*]] = arith.constant 3.40282347E+38 : f32
+! CHECK: omp.yield(%[[VAL_1]] : f32)
+
+! CHECK-LABEL: } combiner {
+! CHECK: ^bb0(%[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: f32):
+! CHECK: %[[VAL_2:.*]] = arith.minimumf %[[VAL_0]], %[[VAL_1]] fastmath<contract> : f32
+! CHECK: omp.yield(%[[VAL_2]] : f32)
+! CHECK: }
+
+! CHECK-LABEL: omp.reduction.declare @min_i_32 : i32 init {
+! CHECK: ^bb0(%[[VAL_0:.*]]: i32):
+! CHECK: %[[VAL_1:.*]] = arith.constant 2147483647 : i32
+! CHECK: omp.yield(%[[VAL_1]] : i32)
+
+! CHECK-LABEL: } combiner {
+! CHECK: ^bb0(%[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: i32):
+! CHECK: %[[VAL_2:.*]] = arith.minsi %[[VAL_0]], %[[VAL_1]] : i32
+! CHECK: omp.yield(%[[VAL_2]] : i32)
+! CHECK: }
+
+! CHECK-LABEL: func.func @_QPreduction_min_int(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "y"}) {
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFreduction_min_intEi"}
+! CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QFreduction_min_intEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_3:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFreduction_min_intEx"}
+! CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_3]] {uniq_name = "_QFreduction_min_intEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFreduction_min_intEy"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
+! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32
+! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : i32, !fir.ref<i32>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_min_intEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_11:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@min_i_32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref<i32>) for (%[[VAL_13:.*]]) : i32 = (%[[VAL_9]]) to (%[[VAL_10]]) inclusive step (%[[VAL_11]]) {
+! CHECK: fir.store %[[VAL_13]] to %[[VAL_8]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_12]] {uniq_name = "_QFreduction_min_intEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_8]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_15]] : (i32) -> i64
+! CHECK: %[[VAL_17:.*]] = hlfir.designate %[[VAL_5]]#0 (%[[VAL_16]]) : (!fir.box<!fir.array<?xi32>>, i64) -> !fir.ref<i32>
+! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_14]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_17]] : !fir.ref<i32>
+! CHECK: %[[VAL_20:.*]] = arith.cmpi slt, %[[VAL_18]], %[[VAL_19]] : i32
+! CHECK: %[[VAL_21:.*]] = arith.select %[[VAL_20]], %[[VAL_18]], %[[VAL_19]] : i32
+! CHECK: hlfir.assign %[[VAL_21]] to %[[VAL_14]]#0 : i32, !fir.ref<i32>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+
+! CHECK-LABEL: func.func @_QPreduction_min_real(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.box<!fir.array<?xf32>> {fir.bindc_name = "y"}) {
+! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFreduction_min_realEi"}
+! CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QFreduction_min_realEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_3:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFreduction_min_realEx"}
+! CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_3]] {uniq_name = "_QFreduction_min_realEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFreduction_min_realEy"} : (!fir.box<!fir.array<?xf32>>) -> (!fir.box<!fir.array<?xf32>>, !fir.box<!fir.array<?xf32>>)
+! CHECK: %[[VAL_6:.*]] = arith.constant 0.000000e+00 : f32
+! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : f32, !fir.ref<f32>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_min_realEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_11:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@min_f_32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref<f32>) for (%[[VAL_13:.*]]) : i32 = (%[[VAL_9]]) to (%[[VAL_10]]) inclusive step (%[[VAL_11]]) {
+! CHECK: fir.store %[[VAL_13]] to %[[VAL_8]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_12]] {uniq_name = "_QFreduction_min_realEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_8]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_15]] : (i32) -> i64
+! CHECK: %[[VAL_17:.*]] = hlfir.designate %[[VAL_5]]#0 (%[[VAL_16]]) : (!fir.box<!fir.array<?xf32>>, i64) -> !fir.ref<f32>
+! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_17]] : !fir.ref<f32>
+! CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_14]]#0 : !fir.ref<f32>
+! CHECK: %[[VAL_20:.*]] = arith.cmpf olt, %[[VAL_18]], %[[VAL_19]] fastmath<contract> : f32
+! CHECK: %[[VAL_21:.*]] = arith.select %[[VAL_20]], %[[VAL_18]], %[[VAL_19]] : f32
+! CHECK: hlfir.assign %[[VAL_21]] to %[[VAL_14]]#0 : f32, !fir.ref<f32>
+! CHECK: omp.yield
+! CHECK: }
+! CHECK: omp.terminator
+! CHECK: }
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_30:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_31:.*]]:2 = hlfir.declare %[[VAL_30]] {uniq_name = "_QFreduction_min_realEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_32:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_33:.*]] = arith.constant 100 : i32
+! CHECK: %[[VAL_34:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@min_f_32 %[[VAL_4]]#0 -> %[[VAL_35:.*]] : !fir.ref<f32>) for (%[[VAL_36:.*]]) : i32 = (%[[VAL_32]]) to (%[[VAL_33]]) inclusive step (%[[VAL_34]]) {
+! CHECK: fir.store %[[VAL_36]] to %[[VAL_31]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_37:.*]]:2 = hlfir.declare %[[VAL_35]] {uniq_name = "_QFreduction_min_realEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_38:.*]] = fir.load %[[VAL_31]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_39:.*]] = fir.convert %[[VAL_38]] : (i32) -> i64
+! CHECK: %[[VAL_40:.*]] = hlfir.designate %[[VAL_5]]#0 (%[[VAL_39]]) : (!fir.box<!fir.array<?xf32>>, i64) -> !fir.ref<f32>
+! CHECK: %[[VAL_41:.*]] = fir.load %[[VAL_40]] : !fir.ref<f32>
+! CHECK: %[[VAL_42:.*]] = fir.load %[[VAL_37]]#0 : !fir.ref<f32>
+! CHECK: %[[VAL_43:.*]] = arith.cmpf ogt, %[[VAL_41]], %[[VAL_42]] fastmath<contract> : f32
+! CHECK: fir.if %[[VAL_43]] {
+! CHECK: %[[VAL_44:.*]] = fir.load %[[VAL_31]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_45:.*]] = fir.convert %[[VAL_44]] : (i32) -> i64
+! CHECK: %[[VAL_46:.*]] = hlfir.designate %[[VAL_5]]#0 (%[[VAL_45]]) : (!fir.box<!fir.array<?xf32>>, i64) -> !fir.ref<f32>
+! CHECK: %[[VAL_47:.*]] = fir.load %[[VAL_46]] : !fir.ref<f32>
+! CHECK: hlfir.assign %[[VAL_47]] to %[[VAL_37]]#0 : f32, !fir.ref<f32>
+! CHECK: } else {
+! CHECK: }
+! CHECK: omp.yield
+! CHECK: omp.terminator
-!CHECK-LABEL: @_QPreduction_min_int
-!CHECK-SAME: %[[Y_BOX:.*]]: !fir.box<!fir.array<?xi32>>
-!CHECK: %[[X_REF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFreduction_min_intEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[X_REF]] {uniq_name = "_QFreduction_min_intEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[Y_BOX]] {uniq_name = "_QFreduction_min_intEy"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
-!CHECK: omp.parallel
-!CHECK: %[[I_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_DECL:.*]]:2 = hlfir.declare %[[I_REF]] {uniq_name = "_QFreduction_min_intEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: omp.wsloop reduction(@[[MIN_DECLARE_I]] -> %[[X_DECL]]#0 : !fir.ref<i32>) for
-!CHECK: fir.store %arg1 to %[[I_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_32:.*]] = fir.load %[[I_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[I_64:.*]] = fir.convert %[[I_32]] : (i32) -> i64
-!CHECK: %[[Y_I_REF:.*]] = hlfir.designate %[[Y_DECL]]#0 (%[[I_64]]) : (!fir.box<!fir.array<?xi32>>, i64) -> !fir.ref<i32>
-!CHECK: %[[Y_I_VAL:.*]] = fir.load %[[Y_I_REF]] : !fir.ref<i32>
-!CHECK: omp.reduction %[[Y_I_VAL]], %[[X_DECL]]#0 : i32, !fir.ref<i32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK-LABEL: @_QPreduction_min_real
-!CHECK-SAME: %[[Y_BOX:.*]]: !fir.box<!fir.array<?xf32>>
-!CHECK: %[[X_REF:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFreduction_min_realEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[X_REF]] {uniq_name = "_QFreduction_min_realEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
-!CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[Y_BOX]] {uniq_name = "_QFreduction_min_realEy"} : (!fir.box<!fir.array<?xf32>>) -> (!fir.box<!fir.array<?xf32>>, !fir.box<!fir.array<?xf32>>)
-!CHECK: omp.parallel
-!CHECK: %[[I_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_DECL:.*]]:2 = hlfir.declare %[[I_REF]] {uniq_name = "_QFreduction_min_realEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: omp.wsloop reduction(@[[MIN_DECLARE_F]] -> %[[X_DECL]]#0 : !fir.ref<f32>) for
-!CHECK: fir.store %arg1 to %[[I_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_32:.*]] = fir.load %[[I_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[I_64:.*]] = fir.convert %[[I_32]] : (i32) -> i64
-!CHECK: %[[Y_I_REF:.*]] = hlfir.designate %[[Y_DECL]]#0 (%[[I_64]]) : (!fir.box<!fir.array<?xf32>>, i64) -> !fir.ref<f32>
-!CHECK: %[[Y_I_VAL:.*]] = fir.load %[[Y_I_REF]] : !fir.ref<f32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
subroutine reduction_min_int(y)
integer :: x, y(:)
@@ -80,7 +140,6 @@ subroutine reduction_min_real(y)
!$omp parallel
!$omp do reduction(min:x)
do i=1, 100
- !CHECK-NOT: omp.reduction
if (y(i) .gt. x) x = y(i)
end do
!$omp end do
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-mul.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-mul.f90
index 7dc8aee..4774fba 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-mul.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-mul.f90
@@ -1,68 +1,76 @@
! RUN: bbc -emit-hlfir -fopenmp %s -o - | FileCheck %s
! RUN: %flang_fc1 -emit-hlfir -fopenmp %s -o - | FileCheck %s
-!CHECK-LABEL: omp.reduction.declare
-!CHECK-SAME: @[[RED_F64_NAME:.*]] : f64 init {
-!CHECK: ^bb0(%{{.*}}: f64):
-!CHECK: %[[C0_1:.*]] = arith.constant 1.000000e+00 : f64
-!CHECK: omp.yield(%[[C0_1]] : f64)
-!CHECK: } combiner {
-!CHECK: ^bb0(%[[ARG0:.*]]: f64, %[[ARG1:.*]]: f64):
-!CHECK: %[[RES:.*]] = arith.mulf %[[ARG0]], %[[ARG1]] {{.*}}: f64
-!CHECK: omp.yield(%[[RES]] : f64)
-!CHECK: }
-!CHECK-LABEL: omp.reduction.declare
-!CHECK-SAME: @[[RED_I64_NAME:.*]] : i64 init {
-!CHECK: ^bb0(%{{.*}}: i64):
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i64
-!CHECK: omp.yield(%[[C1_1]] : i64)
-!CHECK: } combiner {
-!CHECK: ^bb0(%[[ARG0:.*]]: i64, %[[ARG1:.*]]: i64):
-!CHECK: %[[RES:.*]] = arith.muli %[[ARG0]], %[[ARG1]] : i64
-!CHECK: omp.yield(%[[RES]] : i64)
-!CHECK: }
+! NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
-!CHECK-LABEL: omp.reduction.declare
-!CHECK-SAME: @[[RED_F32_NAME:.*]] : f32 init {
-!CHECK: ^bb0(%{{.*}}: f32):
-!CHECK: %[[C0_1:.*]] = arith.constant 1.000000e+00 : f32
-!CHECK: omp.yield(%[[C0_1]] : f32)
-!CHECK: } combiner {
-!CHECK: ^bb0(%[[ARG0:.*]]: f32, %[[ARG1:.*]]: f32):
-!CHECK: %[[RES:.*]] = arith.mulf %[[ARG0]], %[[ARG1]] {{.*}}: f32
-!CHECK: omp.yield(%[[RES]] : f32)
-!CHECK: }
+! CHECK-LABEL: omp.reduction.declare @multiply_reduction_f_64 : f64 init {
+! CHECK: ^bb0(%[[VAL_0:.*]]: f64):
+! CHECK: %[[VAL_1:.*]] = arith.constant 1.000000e+00 : f64
+! CHECK: omp.yield(%[[VAL_1]] : f64)
-!CHECK-LABEL: omp.reduction.declare
-!CHECK-SAME: @[[RED_I32_NAME:.*]] : i32 init {
-!CHECK: ^bb0(%{{.*}}: i32):
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: omp.yield(%[[C1_1]] : i32)
-!CHECK: } combiner {
-!CHECK: ^bb0(%[[ARG0:.*]]: i32, %[[ARG1:.*]]: i32):
-!CHECK: %[[RES:.*]] = arith.muli %[[ARG0]], %[[ARG1]] : i32
-!CHECK: omp.yield(%[[RES]] : i32)
-!CHECK: }
+! CHECK-LABEL: } combiner {
+! CHECK: ^bb0(%[[VAL_0:.*]]: f64, %[[VAL_1:.*]]: f64):
+! CHECK: %[[VAL_2:.*]] = arith.mulf %[[VAL_0]], %[[VAL_1]] fastmath<contract> : f64
+! CHECK: omp.yield(%[[VAL_2]] : f64)
+! CHECK: }
-!CHECK-LABEL: func.func @_QPsimple_int_reduction
-!CHECK: %[[XREF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFsimple_int_reductionEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[XREF]] {uniq_name = "_QFsimple_int_reductionEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: hlfir.assign %[[C1_2]] to %[[X_DECL]]#0 : i32, !fir.ref<i32>
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFsimple_int_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C10:.*]] = arith.constant 10 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_I32_NAME]] -> %[[X_DECL]]#0 : !fir.ref<i32>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C10]]) inclusive step (%[[C1_2]])
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: omp.reduction %[[I_PVT_VAL]], %[[X_DECL]]#0 : i32, !fir.ref<i32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+! CHECK-LABEL: omp.reduction.declare @multiply_reduction_i_64 : i64 init {
+! CHECK: ^bb0(%[[VAL_0:.*]]: i64):
+! CHECK: %[[VAL_1:.*]] = arith.constant 1 : i64
+! CHECK: omp.yield(%[[VAL_1]] : i64)
+
+! CHECK-LABEL: } combiner {
+! CHECK: ^bb0(%[[VAL_0:.*]]: i64, %[[VAL_1:.*]]: i64):
+! CHECK: %[[VAL_2:.*]] = arith.muli %[[VAL_0]], %[[VAL_1]] : i64
+! CHECK: omp.yield(%[[VAL_2]] : i64)
+! CHECK: }
+
+! CHECK-LABEL: omp.reduction.declare @multiply_reduction_f_32 : f32 init {
+! CHECK: ^bb0(%[[VAL_0:.*]]: f32):
+! CHECK: %[[VAL_1:.*]] = arith.constant 1.000000e+00 : f32
+! CHECK: omp.yield(%[[VAL_1]] : f32)
+
+! CHECK-LABEL: } combiner {
+! CHECK: ^bb0(%[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: f32):
+! CHECK: %[[VAL_2:.*]] = arith.mulf %[[VAL_0]], %[[VAL_1]] fastmath<contract> : f32
+! CHECK: omp.yield(%[[VAL_2]] : f32)
+! CHECK: }
+
+! CHECK-LABEL: omp.reduction.declare @multiply_reduction_i_32 : i32 init {
+! CHECK: ^bb0(%[[VAL_0:.*]]: i32):
+! CHECK: %[[VAL_1:.*]] = arith.constant 1 : i32
+! CHECK: omp.yield(%[[VAL_1]] : i32)
+
+! CHECK-LABEL: } combiner {
+! CHECK: ^bb0(%[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: i32):
+! CHECK: %[[VAL_2:.*]] = arith.muli %[[VAL_0]], %[[VAL_1]] : i32
+! CHECK: omp.yield(%[[VAL_2]] : i32)
+! CHECK: }
+
+! CHECK-LABEL: func.func @_QPsimple_int_reduction() {
+! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_int_reductionEi"}
+! CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFsimple_int_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_2:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFsimple_int_reductionEx"}
+! CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFsimple_int_reductionEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_4:.*]] = arith.constant 1 : i32
+! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : i32, !fir.ref<i32>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_8:.*]] = arith.constant 10 : i32
+! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@multiply_reduction_i_32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref<i32>) for (%[[VAL_11:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) {
+! CHECK: fir.store %[[VAL_11]] to %[[VAL_6]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_int_reductionEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_14:.*]] = fir.load %[[VAL_6]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_15:.*]] = arith.muli %[[VAL_13]], %[[VAL_14]] : i32
+! CHECK: hlfir.assign %[[VAL_15]] to %[[VAL_12]]#0 : i32, !fir.ref<i32>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: return
subroutine simple_int_reduction
integer :: x
@@ -76,25 +84,31 @@ subroutine simple_int_reduction
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPsimple_real_reduction
-!CHECK: %[[XREF:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFsimple_real_reductionEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[XREF]] {uniq_name = "_QFsimple_real_reductionEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
-!CHECK: %[[C0_2:.*]] = arith.constant 1.000000e+00 : f32
-!CHECK: hlfir.assign %[[C0_2]] to %[[X_DECL]]#0 : f32, !fir.ref<f32>
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFsimple_real_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 10 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_F32_NAME]] -> %[[X_DECL]]#0 : !fir.ref<f32>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]])
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL_i32:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL_f32:.*]] = fir.convert %[[I_PVT_VAL_i32]] : (i32) -> f32
-!CHECK: omp.reduction %[[I_PVT_VAL_f32]], %[[X_DECL]]#0 : f32, !fir.ref<f32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+! CHECK-LABEL: func.func @_QPsimple_real_reduction() {
+! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_real_reductionEi"}
+! CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFsimple_real_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_2:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFsimple_real_reductionEx"}
+! CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFsimple_real_reductionEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_4:.*]] = arith.constant 1.000000e+00 : f32
+! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : f32, !fir.ref<f32>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_8:.*]] = arith.constant 10 : i32
+! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@multiply_reduction_f_32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref<f32>) for (%[[VAL_11:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) {
+! CHECK: fir.store %[[VAL_11]] to %[[VAL_6]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_real_reductionEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref<f32>
+! CHECK: %[[VAL_14:.*]] = fir.load %[[VAL_6]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_14]] : (i32) -> f32
+! CHECK: %[[VAL_16:.*]] = arith.mulf %[[VAL_13]], %[[VAL_15]] fastmath<contract> : f32
+! CHECK: hlfir.assign %[[VAL_16]] to %[[VAL_12]]#0 : f32, !fir.ref<f32>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: return
+
subroutine simple_real_reduction
real :: x
x = 1.0
@@ -107,24 +121,30 @@ subroutine simple_real_reduction
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPsimple_int_reduction_switch_order
-!CHECK: %[[XREF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFsimple_int_reduction_switch_orderEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[XREF]] {uniq_name = "_QFsimple_int_reduction_switch_orderEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: hlfir.assign %[[C1_2]] to %[[X_DECL]]#0 : i32, !fir.ref<i32>
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFsimple_int_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C10:.*]] = arith.constant 10 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_I32_NAME]] -> %[[X_DECL]]#0 : !fir.ref<i32>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C10]]) inclusive step (%[[C1_2]])
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: omp.reduction %[[I_PVT_VAL]], %[[X_DECL]]#0 : i32, !fir.ref<i32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+! CHECK-LABEL: func.func @_QPsimple_int_reduction_switch_order() {
+! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_int_reduction_switch_orderEi"}
+! CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFsimple_int_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_2:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFsimple_int_reduction_switch_orderEx"}
+! CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFsimple_int_reduction_switch_orderEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_4:.*]] = arith.constant 1 : i32
+! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : i32, !fir.ref<i32>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_8:.*]] = arith.constant 10 : i32
+! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@multiply_reduction_i_32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref<i32>) for (%[[VAL_11:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) {
+! CHECK: fir.store %[[VAL_11]] to %[[VAL_6]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_int_reduction_switch_orderEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_6]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_14:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_15:.*]] = arith.muli %[[VAL_13]], %[[VAL_14]] : i32
+! CHECK: hlfir.assign %[[VAL_15]] to %[[VAL_12]]#0 : i32, !fir.ref<i32>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: return
+
subroutine simple_int_reduction_switch_order
integer :: x
x = 1
@@ -137,25 +157,31 @@ subroutine simple_int_reduction_switch_order
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPsimple_real_reduction_switch_order
-!CHECK: %[[XREF:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFsimple_real_reduction_switch_orderEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[XREF]] {uniq_name = "_QFsimple_real_reduction_switch_orderEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
-!CHECK: %[[C1_2:.*]] = arith.constant 1.000000e+00 : f32
-!CHECK: hlfir.assign %[[C1_2]] to %[[X_DECL]]#0 : f32, !fir.ref<f32>
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFsimple_real_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[C1_1:.*]] = arith.constant 1 : i32
-!CHECK: %[[C100:.*]] = arith.constant 10 : i32
-!CHECK: %[[C1_2:.*]] = arith.constant 1 : i32
-!CHECK: omp.wsloop reduction(@[[RED_F32_NAME]] -> %[[X_DECL]]#0 : !fir.ref<f32>) for (%[[IVAL:.*]]) : i32 = (%[[C1_1]]) to (%[[C100]]) inclusive step (%[[C1_2]])
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL_i32:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL_f32:.*]] = fir.convert %[[I_PVT_VAL_i32]] : (i32) -> f32
-!CHECK: omp.reduction %[[I_PVT_VAL_f32]], %[[X_DECL]]#0 : f32, !fir.ref<f32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+! CHECK-LABEL: func.func @_QPsimple_real_reduction_switch_order() {
+! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsimple_real_reduction_switch_orderEi"}
+! CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFsimple_real_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_2:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFsimple_real_reduction_switch_orderEx"}
+! CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFsimple_real_reduction_switch_orderEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_4:.*]] = arith.constant 1.000000e+00 : f32
+! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : f32, !fir.ref<f32>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_8:.*]] = arith.constant 10 : i32
+! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@multiply_reduction_f_32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref<f32>) for (%[[VAL_11:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) {
+! CHECK: fir.store %[[VAL_11]] to %[[VAL_6]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_real_reduction_switch_orderEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_6]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_13]] : (i32) -> f32
+! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref<f32>
+! CHECK: %[[VAL_16:.*]] = arith.mulf %[[VAL_14]], %[[VAL_15]] fastmath<contract> : f32
+! CHECK: hlfir.assign %[[VAL_16]] to %[[VAL_12]]#0 : f32, !fir.ref<f32>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: return
+
subroutine simple_real_reduction_switch_order
real :: x
x = 1.0
@@ -168,27 +194,48 @@ subroutine simple_real_reduction_switch_order
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPmultiple_int_reductions_same_type
-!CHECK: %[[XREF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFmultiple_int_reductions_same_typeEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[XREF]] {uniq_name = "_QFmultiple_int_reductions_same_typeEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[YREF:.*]] = fir.alloca i32 {bindc_name = "y", uniq_name = "_QFmultiple_int_reductions_same_typeEy"}
-!CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[YREF]] {uniq_name = "_QFmultiple_int_reductions_same_typeEy"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[ZREF:.*]] = fir.alloca i32 {bindc_name = "z", uniq_name = "_QFmultiple_int_reductions_same_typeEz"}
-!CHECK: %[[Z_DECL:.*]]:2 = hlfir.declare %[[ZREF]] {uniq_name = "_QFmultiple_int_reductions_same_typeEz"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFmultiple_int_reductions_same_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: omp.wsloop reduction(@[[RED_I32_NAME]] -> %[[X_DECL]]#0 : !fir.ref<i32>, @[[RED_I32_NAME]] -> %[[Y_DECL]]#0 : !fir.ref<i32>, @[[RED_I32_NAME]] -> %[[Z_DECL]]#0 : !fir.ref<i32>) for (%[[IVAL]]) : i32
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL1:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: omp.reduction %[[I_PVT_VAL1]], %[[X_DECL]]#0 : i32, !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL2:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: omp.reduction %[[I_PVT_VAL2]], %[[Y_DECL]]#0 : i32, !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL3:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: omp.reduction %[[I_PVT_VAL3]], %[[Z_DECL]]#0 : i32, !fir.ref<i32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+! CHECK-LABEL: func.func @_QPmultiple_int_reductions_same_type() {
+! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFmultiple_int_reductions_same_typeEi"}
+! CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFmultiple_int_reductions_same_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_2:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFmultiple_int_reductions_same_typeEx"}
+! CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFmultiple_int_reductions_same_typeEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_4:.*]] = fir.alloca i32 {bindc_name = "y", uniq_name = "_QFmultiple_int_reductions_same_typeEy"}
+! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_4]] {uniq_name = "_QFmultiple_int_reductions_same_typeEy"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_6:.*]] = fir.alloca i32 {bindc_name = "z", uniq_name = "_QFmultiple_int_reductions_same_typeEz"}
+! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_6]] {uniq_name = "_QFmultiple_int_reductions_same_typeEz"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_8:.*]] = arith.constant 1 : i32
+! CHECK: hlfir.assign %[[VAL_8]] to %[[VAL_3]]#0 : i32, !fir.ref<i32>
+! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
+! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_5]]#0 : i32, !fir.ref<i32>
+! CHECK: %[[VAL_10:.*]] = arith.constant 1 : i32
+! CHECK: hlfir.assign %[[VAL_10]] to %[[VAL_7]]#0 : i32, !fir.ref<i32>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_int_reductions_same_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_14:.*]] = arith.constant 10 : i32
+! CHECK: %[[VAL_15:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@multiply_reduction_i_32 %[[VAL_3]]#0 -> %[[VAL_16:.*]] : !fir.ref<i32>, @multiply_reduction_i_32 %[[VAL_5]]#0 -> %[[VAL_17:.*]] : !fir.ref<i32>, @multiply_reduction_i_32 %[[VAL_7]]#0 -> %[[VAL_18:.*]] : !fir.ref<i32>) for (%[[VAL_19:.*]]) : i32 = (%[[VAL_13]]) to (%[[VAL_14]]) inclusive step (%[[VAL_15]]) {
+! CHECK: fir.store %[[VAL_19]] to %[[VAL_12]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_20:.*]]:2 = hlfir.declare %[[VAL_16]] {uniq_name = "_QFmultiple_int_reductions_same_typeEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_21:.*]]:2 = hlfir.declare %[[VAL_17]] {uniq_name = "_QFmultiple_int_reductions_same_typeEy"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_22:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_int_reductions_same_typeEz"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_23:.*]] = fir.load %[[VAL_20]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_24:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_25:.*]] = arith.muli %[[VAL_23]], %[[VAL_24]] : i32
+! CHECK: hlfir.assign %[[VAL_25]] to %[[VAL_20]]#0 : i32, !fir.ref<i32>
+! CHECK: %[[VAL_26:.*]] = fir.load %[[VAL_21]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_27:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_28:.*]] = arith.muli %[[VAL_26]], %[[VAL_27]] : i32
+! CHECK: hlfir.assign %[[VAL_28]] to %[[VAL_21]]#0 : i32, !fir.ref<i32>
+! CHECK: %[[VAL_29:.*]] = fir.load %[[VAL_22]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_30:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_31:.*]] = arith.muli %[[VAL_29]], %[[VAL_30]] : i32
+! CHECK: hlfir.assign %[[VAL_31]] to %[[VAL_22]]#0 : i32, !fir.ref<i32>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: return
+
subroutine multiple_int_reductions_same_type
integer :: x,y,z
x = 1
@@ -205,30 +252,51 @@ subroutine multiple_int_reductions_same_type
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPmultiple_real_reductions_same_type
-!CHECK: %[[XREF:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFmultiple_real_reductions_same_typeEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[XREF]] {uniq_name = "_QFmultiple_real_reductions_same_typeEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
-!CHECK: %[[YREF:.*]] = fir.alloca f32 {bindc_name = "y", uniq_name = "_QFmultiple_real_reductions_same_typeEy"}
-!CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[YREF]] {uniq_name = "_QFmultiple_real_reductions_same_typeEy"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
-!CHECK: %[[ZREF:.*]] = fir.alloca f32 {bindc_name = "z", uniq_name = "_QFmultiple_real_reductions_same_typeEz"}
-!CHECK: %[[Z_DECL:.*]]:2 = hlfir.declare %[[ZREF]] {uniq_name = "_QFmultiple_real_reductions_same_typeEz"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFmultiple_real_reductions_same_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: omp.wsloop reduction(@[[RED_F32_NAME]] -> %[[X_DECL]]#0 : !fir.ref<f32>, @[[RED_F32_NAME]] -> %[[Y_DECL]]#0 : !fir.ref<f32>, @[[RED_F32_NAME]] -> %[[Z_DECL]]#0 : !fir.ref<f32>) for (%[[IVAL]]) : i32
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL1_I32:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL1_F32:.*]] = fir.convert %[[I_PVT_VAL1_I32]] : (i32) -> f32
-!CHECK: omp.reduction %[[I_PVT_VAL1_F32]], %[[X_DECL]]#0 : f32, !fir.ref<f32>
-!CHECK: %[[I_PVT_VAL2_I32:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL2_F32:.*]] = fir.convert %[[I_PVT_VAL2_I32]] : (i32) -> f32
-!CHECK: omp.reduction %[[I_PVT_VAL2_F32]], %[[Y_DECL]]#0 : f32, !fir.ref<f32>
-!CHECK: %[[I_PVT_VAL3_I32:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL3_F32:.*]] = fir.convert %[[I_PVT_VAL3_I32]] : (i32) -> f32
-!CHECK: omp.reduction %[[I_PVT_VAL3_F32]], %[[Z_DECL]]#0 : f32, !fir.ref<f32>
-!CHECK: omp.yield
-!CHECK: omp.terminator
-!CHECK: return
+! CHECK-LABEL: func.func @_QPmultiple_real_reductions_same_type() {
+! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFmultiple_real_reductions_same_typeEi"}
+! CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFmultiple_real_reductions_same_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_2:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFmultiple_real_reductions_same_typeEx"}
+! CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFmultiple_real_reductions_same_typeEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_4:.*]] = fir.alloca f32 {bindc_name = "y", uniq_name = "_QFmultiple_real_reductions_same_typeEy"}
+! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_4]] {uniq_name = "_QFmultiple_real_reductions_same_typeEy"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_6:.*]] = fir.alloca f32 {bindc_name = "z", uniq_name = "_QFmultiple_real_reductions_same_typeEz"}
+! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_6]] {uniq_name = "_QFmultiple_real_reductions_same_typeEz"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_8:.*]] = arith.constant 1.000000e+00 : f32
+! CHECK: hlfir.assign %[[VAL_8]] to %[[VAL_3]]#0 : f32, !fir.ref<f32>
+! CHECK: %[[VAL_9:.*]] = arith.constant 1.000000e+00 : f32
+! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_5]]#0 : f32, !fir.ref<f32>
+! CHECK: %[[VAL_10:.*]] = arith.constant 1.000000e+00 : f32
+! CHECK: hlfir.assign %[[VAL_10]] to %[[VAL_7]]#0 : f32, !fir.ref<f32>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_real_reductions_same_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_14:.*]] = arith.constant 10 : i32
+! CHECK: %[[VAL_15:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@multiply_reduction_f_32 %[[VAL_3]]#0 -> %[[VAL_16:.*]] : !fir.ref<f32>, @multiply_reduction_f_32 %[[VAL_5]]#0 -> %[[VAL_17:.*]] : !fir.ref<f32>, @multiply_reduction_f_32 %[[VAL_7]]#0 -> %[[VAL_18:.*]] : !fir.ref<f32>) for (%[[VAL_19:.*]]) : i32 = (%[[VAL_13]]) to (%[[VAL_14]]) inclusive step (%[[VAL_15]]) {
+! CHECK: fir.store %[[VAL_19]] to %[[VAL_12]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_20:.*]]:2 = hlfir.declare %[[VAL_16]] {uniq_name = "_QFmultiple_real_reductions_same_typeEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_21:.*]]:2 = hlfir.declare %[[VAL_17]] {uniq_name = "_QFmultiple_real_reductions_same_typeEy"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_22:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_real_reductions_same_typeEz"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_23:.*]] = fir.load %[[VAL_20]]#0 : !fir.ref<f32>
+! CHECK: %[[VAL_24:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_25:.*]] = fir.convert %[[VAL_24]] : (i32) -> f32
+! CHECK: %[[VAL_26:.*]] = arith.mulf %[[VAL_23]], %[[VAL_25]] fastmath<contract> : f32
+! CHECK: hlfir.assign %[[VAL_26]] to %[[VAL_20]]#0 : f32, !fir.ref<f32>
+! CHECK: %[[VAL_27:.*]] = fir.load %[[VAL_21]]#0 : !fir.ref<f32>
+! CHECK: %[[VAL_28:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_29:.*]] = fir.convert %[[VAL_28]] : (i32) -> f32
+! CHECK: %[[VAL_30:.*]] = arith.mulf %[[VAL_27]], %[[VAL_29]] fastmath<contract> : f32
+! CHECK: hlfir.assign %[[VAL_30]] to %[[VAL_21]]#0 : f32, !fir.ref<f32>
+! CHECK: %[[VAL_31:.*]] = fir.load %[[VAL_22]]#0 : !fir.ref<f32>
+! CHECK: %[[VAL_32:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_33:.*]] = fir.convert %[[VAL_32]] : (i32) -> f32
+! CHECK: %[[VAL_34:.*]] = arith.mulf %[[VAL_31]], %[[VAL_33]] fastmath<contract> : f32
+! CHECK: hlfir.assign %[[VAL_34]] to %[[VAL_22]]#0 : f32, !fir.ref<f32>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: return
+
subroutine multiple_real_reductions_same_type
real :: x,y,z
x = 1
@@ -245,33 +313,61 @@ subroutine multiple_real_reductions_same_type
!$omp end parallel
end subroutine
-!CHECK-LABEL: func.func @_QPmultiple_reductions_different_type
-!CHECK: %[[WREF:.*]] = fir.alloca f64 {bindc_name = "w", uniq_name = "_QFmultiple_reductions_different_typeEw"}
-!CHECK: %[[W_DECL:.*]]:2 = hlfir.declare %[[WREF]] {uniq_name = "_QFmultiple_reductions_different_typeEw"} : (!fir.ref<f64>) -> (!fir.ref<f64>, !fir.ref<f64>)
-!CHECK: %[[XREF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFmultiple_reductions_different_typeEx"}
-!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[XREF]] {uniq_name = "_QFmultiple_reductions_different_typeEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: %[[YREF:.*]] = fir.alloca i64 {bindc_name = "y", uniq_name = "_QFmultiple_reductions_different_typeEy"}
-!CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[YREF]] {uniq_name = "_QFmultiple_reductions_different_typeEy"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>)
-!CHECK: %[[ZREF:.*]] = fir.alloca f32 {bindc_name = "z", uniq_name = "_QFmultiple_reductions_different_typeEz"}
-!CHECK: %[[Z_DECL:.*]]:2 = hlfir.declare %[[ZREF]] {uniq_name = "_QFmultiple_reductions_different_typeEz"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
-!CHECK: omp.parallel
-!CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
-!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFmultiple_reductions_different_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK: omp.wsloop reduction(@[[RED_I32_NAME]] -> %[[X_DECL]]#0 : !fir.ref<i32>, @[[RED_I64_NAME]] -> %[[Y_DECL]]#0 : !fir.ref<i64>, @[[RED_F32_NAME]] -> %[[Z_DECL]]#0 : !fir.ref<f32>, @[[RED_F64_NAME]] -> %[[W_DECL]]#0 : !fir.ref<f64>) for (%[[IVAL:.*]]) : i32
-!CHECK: fir.store %[[IVAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL1_I32:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: omp.reduction %[[I_PVT_VAL1_I32]], %[[X_DECL]]#0 : i32, !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL2_I32:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL2_I64:.*]] = fir.convert %[[I_PVT_VAL2_I32]] : (i32) -> i64
-!CHECK: omp.reduction %[[I_PVT_VAL2_I64]], %[[Y_DECL]]#0 : i64, !fir.ref<i64>
-!CHECK: %[[I_PVT_VAL3_I32:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL3_F32:.*]] = fir.convert %[[I_PVT_VAL3_I32]] : (i32) -> f32
-!CHECK: omp.reduction %[[I_PVT_VAL3_F32]], %[[Z_DECL]]#0 : f32, !fir.ref<f32>
-!CHECK: %[[I_PVT_VAL4_I32:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref<i32>
-!CHECK: %[[I_PVT_VAL4_F64:.*]] = fir.convert %[[I_PVT_VAL4_I32]] : (i32) -> f64
-!CHECK: omp.reduction %[[I_PVT_VAL4_F64]], %[[W_DECL]]#0 : f64, !fir.ref<f64>
-!CHECK: omp.terminator
-!CHECK: return
+! CHECK-LABEL: func.func @_QPmultiple_reductions_different_type() {
+! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFmultiple_reductions_different_typeEi"}
+! CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFmultiple_reductions_different_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_2:.*]] = fir.alloca f64 {bindc_name = "w", uniq_name = "_QFmultiple_reductions_different_typeEw"}
+! CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFmultiple_reductions_different_typeEw"} : (!fir.ref<f64>) -> (!fir.ref<f64>, !fir.ref<f64>)
+! CHECK: %[[VAL_4:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFmultiple_reductions_different_typeEx"}
+! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_4]] {uniq_name = "_QFmultiple_reductions_different_typeEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_6:.*]] = fir.alloca i64 {bindc_name = "y", uniq_name = "_QFmultiple_reductions_different_typeEy"}
+! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_6]] {uniq_name = "_QFmultiple_reductions_different_typeEy"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>)
+! CHECK: %[[VAL_8:.*]] = fir.alloca f32 {bindc_name = "z", uniq_name = "_QFmultiple_reductions_different_typeEz"}
+! CHECK: %[[VAL_9:.*]]:2 = hlfir.declare %[[VAL_8]] {uniq_name = "_QFmultiple_reductions_different_typeEz"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_10:.*]] = arith.constant 1 : i32
+! CHECK: hlfir.assign %[[VAL_10]] to %[[VAL_5]]#0 : i32, !fir.ref<i32>
+! CHECK: %[[VAL_11:.*]] = arith.constant 1 : i64
+! CHECK: hlfir.assign %[[VAL_11]] to %[[VAL_7]]#0 : i64, !fir.ref<i64>
+! CHECK: %[[VAL_12:.*]] = arith.constant 1.000000e+00 : f32
+! CHECK: hlfir.assign %[[VAL_12]] to %[[VAL_9]]#0 : f32, !fir.ref<f32>
+! CHECK: %[[VAL_13:.*]] = arith.constant 1.000000e+00 : f64
+! CHECK: hlfir.assign %[[VAL_13]] to %[[VAL_3]]#0 : f64, !fir.ref<f64>
+! CHECK: omp.parallel {
+! CHECK: %[[VAL_14:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_15:.*]]:2 = hlfir.declare %[[VAL_14]] {uniq_name = "_QFmultiple_reductions_different_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_16:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_17:.*]] = arith.constant 10 : i32
+! CHECK: %[[VAL_18:.*]] = arith.constant 1 : i32
+! CHECK: omp.wsloop reduction(@multiply_reduction_i_32 %[[VAL_5]]#0 -> %[[VAL_19:.*]] : !fir.ref<i32>, @multiply_reduction_i_64 %[[VAL_7]]#0 -> %[[VAL_20:.*]] : !fir.ref<i64>, @multiply_reduction_f_32 %[[VAL_9]]#0 -> %[[VAL_21:.*]] : !fir.ref<f32>, @multiply_reduction_f_64 %[[VAL_3]]#0 -> %[[VAL_22:.*]] : !fir.ref<f64>) for (%[[VAL_23:.*]]) : i32 = (%[[VAL_16]]) to (%[[VAL_17]]) inclusive step (%[[VAL_18]]) {
+! CHECK: fir.store %[[VAL_23]] to %[[VAL_15]]#1 : !fir.ref<i32>
+! CHECK: %[[VAL_24:.*]]:2 = hlfir.declare %[[VAL_19]] {uniq_name = "_QFmultiple_reductions_different_typeEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_25:.*]]:2 = hlfir.declare %[[VAL_20]] {uniq_name = "_QFmultiple_reductions_different_typeEy"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>)
+! CHECK: %[[VAL_26:.*]]:2 = hlfir.declare %[[VAL_21]] {uniq_name = "_QFmultiple_reductions_different_typeEz"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+! CHECK: %[[VAL_27:.*]]:2 = hlfir.declare %[[VAL_22]] {uniq_name = "_QFmultiple_reductions_different_typeEw"} : (!fir.ref<f64>) -> (!fir.ref<f64>, !fir.ref<f64>)
+! CHECK: %[[VAL_28:.*]] = fir.load %[[VAL_24]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_29:.*]] = fir.load %[[VAL_15]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_30:.*]] = arith.muli %[[VAL_28]], %[[VAL_29]] : i32
+! CHECK: hlfir.assign %[[VAL_30]] to %[[VAL_24]]#0 : i32, !fir.ref<i32>
+! CHECK: %[[VAL_31:.*]] = fir.load %[[VAL_25]]#0 : !fir.ref<i64>
+! CHECK: %[[VAL_32:.*]] = fir.load %[[VAL_15]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_33:.*]] = fir.convert %[[VAL_32]] : (i32) -> i64
+! CHECK: %[[VAL_34:.*]] = arith.muli %[[VAL_31]], %[[VAL_33]] : i64
+! CHECK: hlfir.assign %[[VAL_34]] to %[[VAL_25]]#0 : i64, !fir.ref<i64>
+! CHECK: %[[VAL_35:.*]] = fir.load %[[VAL_26]]#0 : !fir.ref<f32>
+! CHECK: %[[VAL_36:.*]] = fir.load %[[VAL_15]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_37:.*]] = fir.convert %[[VAL_36]] : (i32) -> f32
+! CHECK: %[[VAL_38:.*]] = arith.mulf %[[VAL_35]], %[[VAL_37]] fastmath<contract> : f32
+! CHECK: hlfir.assign %[[VAL_38]] to %[[VAL_26]]#0 : f32, !fir.ref<f32>
+! CHECK: %[[VAL_39:.*]] = fir.load %[[VAL_27]]#0 : !fir.ref<f64>
+! CHECK: %[[VAL_40:.*]] = fir.load %[[VAL_15]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_41:.*]] = fir.convert %[[VAL_40]] : (i32) -> f64
+! CHECK: %[[VAL_42:.*]] = arith.mulf %[[VAL_39]], %[[VAL_41]] fastmath<contract> : f64
+! CHECK: hlfir.assign %[[VAL_42]] to %[[VAL_27]]#0 : f64, !fir.ref<f64>
+! CHECK: omp.yield
+! CHECK: omp.terminator
+! CHECK: return
+
+
subroutine multiple_reductions_different_type
integer :: x
integer(kind=8) :: y
diff --git a/libc/cmake/modules/CheckCompilerFeatures.cmake b/libc/cmake/modules/CheckCompilerFeatures.cmake
index 983ce86..9789d72 100644
--- a/libc/cmake/modules/CheckCompilerFeatures.cmake
+++ b/libc/cmake/modules/CheckCompilerFeatures.cmake
@@ -2,8 +2,7 @@
# Compiler features definition and flags
# ------------------------------------------------------------------------------
-# Initialize ALL_COMPILER_FEATURES as empty list.
-set(ALL_COMPILER_FEATURES "float128")
+set(ALL_COMPILER_FEATURES "float128" "fixed_point")
# Making sure ALL_COMPILER_FEATURES is sorted.
list(SORT ALL_COMPILER_FEATURES)
@@ -42,16 +41,23 @@ set(AVAILABLE_COMPILER_FEATURES "")
# Try compile a C file to check if flag is supported.
set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
foreach(feature IN LISTS ALL_COMPILER_FEATURES)
+ set(compile_options ${LIBC_COMPILE_OPTIONS_NATIVE})
+ if(${feature} STREQUAL "fixed_point")
+ list(APPEND compile_options "-ffixed-point")
+ endif()
+
try_compile(
has_feature
${CMAKE_CURRENT_BINARY_DIR}/compiler_features
SOURCES ${LIBC_SOURCE_DIR}/cmake/modules/compiler_features/check_${feature}.cpp
- COMPILE_DEFINITIONS -I${LIBC_SOURCE_DIR} ${LIBC_COMPILE_OPTIONS_NATIVE}
+ COMPILE_DEFINITIONS -I${LIBC_SOURCE_DIR} ${compile_options}
)
if(has_feature)
list(APPEND AVAILABLE_COMPILER_FEATURES ${feature})
if(${feature} STREQUAL "float128")
set(LIBC_COMPILER_HAS_FLOAT128 TRUE)
+ elseif(${feature} STREQUAL "fixed_point")
+ set(LIBC_COMPILER_HAS_FIXED_POINT TRUE)
endif()
endif()
endforeach()
diff --git a/libc/cmake/modules/LLVMLibCObjectRules.cmake b/libc/cmake/modules/LLVMLibCObjectRules.cmake
index ef1f248..54c7e1e 100644
--- a/libc/cmake/modules/LLVMLibCObjectRules.cmake
+++ b/libc/cmake/modules/LLVMLibCObjectRules.cmake
@@ -49,6 +49,10 @@ function(_get_common_compile_options output_var flags)
list(APPEND compile_options "-ffreestanding")
endif()
+ if(LIBC_COMPILER_HAS_FIXED_POINT)
+ list(APPEND compile_options "-ffixed-point")
+ endif()
+
list(APPEND compile_options "-fno-builtin")
list(APPEND compile_options "-fno-exceptions")
list(APPEND compile_options "-fno-lax-vector-conversions")
diff --git a/libc/cmake/modules/compiler_features/check_fixed_point.cpp b/libc/cmake/modules/compiler_features/check_fixed_point.cpp
new file mode 100644
index 0000000..a519269
--- /dev/null
+++ b/libc/cmake/modules/compiler_features/check_fixed_point.cpp
@@ -0,0 +1,5 @@
+#include "include/llvm-libc-macros/stdfix-macros.h"
+
+#ifndef LIBC_COMPILER_HAS_FIXED_POINT
+#error unsupported
+#endif
diff --git a/libc/config/linux/aarch64/entrypoints.txt b/libc/config/linux/aarch64/entrypoints.txt
index bc09f4881..6e19468 100644
--- a/libc/config/linux/aarch64/entrypoints.txt
+++ b/libc/config/linux/aarch64/entrypoints.txt
@@ -387,6 +387,7 @@ if(LIBC_COMPILER_HAS_FLOAT128)
libc.src.math.fmaxf128
libc.src.math.fminf128
libc.src.math.frexpf128
+ libc.src.math.ldexpf128
libc.src.math.roundf128
libc.src.math.sqrtf128
libc.src.math.truncf128
diff --git a/libc/config/linux/api.td b/libc/config/linux/api.td
index c1f052e..5a1d764 100644
--- a/libc/config/linux/api.td
+++ b/libc/config/linux/api.td
@@ -6,6 +6,7 @@ include "spec/linux.td"
include "spec/gnu_ext.td"
include "spec/bsd_ext.td"
include "spec/llvm_libc_ext.td"
+include "spec/stdc_ext.td"
def AssertMacro : MacroDef<"assert"> {
let Defn = [{
diff --git a/libc/config/linux/riscv/entrypoints.txt b/libc/config/linux/riscv/entrypoints.txt
index 02412e7..71ff4bc 100644
--- a/libc/config/linux/riscv/entrypoints.txt
+++ b/libc/config/linux/riscv/entrypoints.txt
@@ -396,6 +396,7 @@ if(LIBC_COMPILER_HAS_FLOAT128)
libc.src.math.fmaxf128
libc.src.math.fminf128
libc.src.math.frexpf128
+ libc.src.math.ldexpf128
libc.src.math.roundf128
libc.src.math.sqrtf128
libc.src.math.truncf128
diff --git a/libc/config/linux/x86_64/entrypoints.txt b/libc/config/linux/x86_64/entrypoints.txt
index 75e39ae..33f6e97 100644
--- a/libc/config/linux/x86_64/entrypoints.txt
+++ b/libc/config/linux/x86_64/entrypoints.txt
@@ -117,6 +117,21 @@ set(TARGET_LIBC_ENTRYPOINTS
libc.src.stdbit.stdc_first_leading_zero_ui
libc.src.stdbit.stdc_first_leading_zero_ul
libc.src.stdbit.stdc_first_leading_zero_ull
+ libc.src.stdbit.stdc_first_leading_one_uc
+ libc.src.stdbit.stdc_first_leading_one_us
+ libc.src.stdbit.stdc_first_leading_one_ui
+ libc.src.stdbit.stdc_first_leading_one_ul
+ libc.src.stdbit.stdc_first_leading_one_ull
+ libc.src.stdbit.stdc_first_trailing_zero_uc
+ libc.src.stdbit.stdc_first_trailing_zero_us
+ libc.src.stdbit.stdc_first_trailing_zero_ui
+ libc.src.stdbit.stdc_first_trailing_zero_ul
+ libc.src.stdbit.stdc_first_trailing_zero_ull
+ libc.src.stdbit.stdc_first_trailing_one_uc
+ libc.src.stdbit.stdc_first_trailing_one_us
+ libc.src.stdbit.stdc_first_trailing_one_ui
+ libc.src.stdbit.stdc_first_trailing_one_ul
+ libc.src.stdbit.stdc_first_trailing_one_ull
# stdlib.h entrypoints
libc.src.stdlib.abs
@@ -420,6 +435,7 @@ if(LIBC_COMPILER_HAS_FLOAT128)
libc.src.math.fmaxf128
libc.src.math.fminf128
libc.src.math.frexpf128
+ libc.src.math.ldexpf128
libc.src.math.roundf128
libc.src.math.sqrtf128
libc.src.math.truncf128
diff --git a/libc/config/linux/x86_64/headers.txt b/libc/config/linux/x86_64/headers.txt
index 8f37cf9..d0c662c 100644
--- a/libc/config/linux/x86_64/headers.txt
+++ b/libc/config/linux/x86_64/headers.txt
@@ -16,6 +16,7 @@ set(TARGET_PUBLIC_HEADERS
libc.include.spawn
libc.include.setjmp
libc.include.stdbit
+ libc.include.stdfix
libc.include.stdio
libc.include.stdlib
libc.include.string
diff --git a/libc/docs/math/index.rst b/libc/docs/math/index.rst
index 9460449..c586fe6 100644
--- a/libc/docs/math/index.rst
+++ b/libc/docs/math/index.rst
@@ -16,6 +16,7 @@ Math Functions
:hidden:
log.rst
+ stdfix.rst
.. contents:: Table of Contents
@@ -190,6 +191,8 @@ Basic Operations
+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
| ldexpl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| ldexpf128 | |check| | |check| | | |check| | | | | | | | | |
++--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
| llrint | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
| llrintf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
@@ -611,6 +614,11 @@ Algorithms + Implementation Details
* :doc:`log`
+Fixed-point Arithmetics
+=======================
+
+* :doc:`stdfix`
+
References
==========
diff --git a/libc/docs/math/stdfix.rst b/libc/docs/math/stdfix.rst
new file mode 100644
index 0000000..85d721b
--- /dev/null
+++ b/libc/docs/math/stdfix.rst
@@ -0,0 +1,136 @@
+================
+StdFix Functions
+================
+
+.. include:: ../check.rst
+
+Standards
+---------
+
+- stdfix.h is specified in the `ISO/IEC TR 18037:2008 <https://www.iso.org/standard/51126.html>`_,
+ C extensions to support embedded processors .
+
+- Its `specifications <https://standards.iso.org/ittf/PubliclyAvailableStandards/c051126_ISO_IEC_TR_18037_2008.zip>`_.
+
+---------------
+Source location
+---------------
+
+- The main source for fixed-point functions is located at:
+ ``libc/src/stdfix`` with subdirectories for internal implementations.
+
+---------------------
+Implementation Status
+---------------------
+
+Requirements
+============
+
+- In order to build LLVM libc to support fixed-point arithmetics, we need the
+ compiler to support the basic fixed-point types `_Fract` and `_Accum` in
+ C++.
+
+- For the users to be able to use the generated headers, their compiler needs
+ to support `_Fract` and `_Accum` types in C or C++.
+
+- This compiler support is checked at the beginning of
+ `libc/include/llvm-libc-macros/stdfix-macros.h <https://github.com/llvm/llvm-project/tree/main/libc/include/llvm-libc-macros/stdfix-macros.h>`_.
+
+
+
+Predefined Macros
+=================
+
+- We use the macro `LIBC_COMPILER_HAS_FIXED_POINT` to specify whether the
+ compiler support the fixed-point types.
+
+- Other predefined precision macros specified in section 7.18a.3 are defined
+ in `libc/include/llvm-libc-macros/stdfix-macros.h <https://github.com/llvm/llvm-project/tree/main/libc/include/llvm-libc-macros/stdfix-macros.h>`_
+ using the default configuration of `typical desktop processor` in section
+ A.3.
+
+
+Fixed-point Arithmetics
+=======================
+
++---------------+------------------------------------------------------------------------------------------+------------------------------------------------------------------------------------------+
+| Function Name | _Fract (r) | _Accum (k) |
+| +------------------------------+----------------------------+------------------------------+------------------------------+----------------------------+------------------------------+
+| | short (hr) | _ (r) | long (lr) | short (hk) | _ (k) | long (lk) |
+| +----------------+-------------+---------------+------------+----------------+-------------+----------------+-------------+---------------+------------+----------------+-------------+
+| | unsigned (uhr) | signed (hr) | unsigned (ur) | signed (r) | unsigned (ulr) | signed (lr) | unsigned (uhk) | signed (hk) | unsigned (uk) | signed (k) | unsigned (ulk) | signed (lk) |
++===============+================+=============+===============+============+================+=============+================+=============+===============+============+================+=============+
+| abs | | | | | | | | | | | | |
++---------------+----------------+-------------+---------------+------------+----------------+-------------+----------------+-------------+---------------+------------+----------------+-------------+
+| bits\* | | | | | | | | | | | | |
++---------------+----------------+-------------+---------------+------------+----------------+-------------+----------------+-------------+---------------+------------+----------------+-------------+
+| \*bits | | | | | | | | | | | | |
++---------------+----------------+-------------+---------------+------------+----------------+-------------+----------------+-------------+---------------+------------+----------------+-------------+
+| countls | | | | | | | | | | | | |
++---------------+----------------+-------------+---------------+------------+----------------+-------------+----------------+-------------+---------------+------------+----------------+-------------+
+| divi | | | | | | | | | | | | |
++---------------+----------------+-------------+---------------+------------+----------------+-------------+----------------+-------------+---------------+------------+----------------+-------------+
+| idivi | | | | | | | | | | | | |
++---------------+----------------+-------------+---------------+------------+----------------+-------------+----------------+-------------+---------------+------------+----------------+-------------+
+| muli | | | | | | | | | | | | |
++---------------+----------------+-------------+---------------+------------+----------------+-------------+----------------+-------------+---------------+------------+----------------+-------------+
+| rdivi | | | | | | | | | | | | |
++---------------+----------------+-------------+---------------+------------+----------------+-------------+----------------+-------------+---------------+------------+----------------+-------------+
+| round | | | | | | | | | | | | |
++---------------+----------------+-------------+---------------+------------+----------------+-------------+----------------+-------------+---------------+------------+----------------+-------------+
+| sqrt | | | | | | | | | | | | |
++---------------+----------------+-------------+---------------+------------+----------------+-------------+----------------+-------------+---------------+------------+----------------+-------------+
+
+================== =========
+Type Generic Macro Available
+================== =========
+absfx
+countlsfx
+roundfx
+================== =========
+
+
+Higher math functions
+=====================
+
++---------------+------------------------------------------------------------------------------------------+------------------------------------------------------------------------------------------+
+| Function Name | _Fract (r) | _Accum (k) |
+| +------------------------------+----------------------------+------------------------------+------------------------------+----------------------------+------------------------------+
+| | short (hr) | _ (r) | long (lr) | short (hk) | _ (k) | long (lk) |
+| +----------------+-------------+---------------+------------+----------------+-------------+----------------+-------------+---------------+------------+----------------+-------------+
+| | unsigned (uhr) | signed (hr) | unsigned (ur) | signed (r) | unsigned (ulr) | signed (lr) | unsigned (uhk) | signed (hk) | unsigned (uk) | signed (k) | unsigned (ulk) | signed (lk) |
++===============+================+=============+===============+============+================+=============+================+=============+===============+============+================+=============+
+| cos | | | | | | | | | | | | |
++---------------+----------------+-------------+---------------+------------+----------------+-------------+----------------+-------------+---------------+------------+----------------+-------------+
+| exp | | | | | | | | | | | | |
++---------------+----------------+-------------+---------------+------------+----------------+-------------+----------------+-------------+---------------+------------+----------------+-------------+
+| log | | | | | | | | | | | | |
++---------------+----------------+-------------+---------------+------------+----------------+-------------+----------------+-------------+---------------+------------+----------------+-------------+
+| sin | | | | | | | | | | | | |
++---------------+----------------+-------------+---------------+------------+----------------+-------------+----------------+-------------+---------------+------------+----------------+-------------+
+| tan | | | | | | | | | | | | |
++---------------+----------------+-------------+---------------+------------+----------------+-------------+----------------+-------------+---------------+------------+----------------+-------------+
+
+
+Conversion Functions
+====================
+
++---------------+------------------------------------------------------------------------------------------+------------------------------------------------------------------------------------------+
+| Function Name | _Fract (r) | _Accum (k) |
+| +------------------------------+----------------------------+------------------------------+------------------------------+----------------------------+------------------------------+
+| | short (hr) | _ (r) | long (lr) | short (hk) | _ (k) | long (lk) |
+| +----------------+-------------+---------------+------------+----------------+-------------+----------------+-------------+---------------+------------+----------------+-------------+
+| | unsigned (uhr) | signed (hr) | unsigned (ur) | signed (r) | unsigned (ulr) | signed (lr) | unsigned (uhk) | signed (hk) | unsigned (uk) | signed (k) | unsigned (ulk) | signed (lk) |
++===============+================+=============+===============+============+================+=============+================+=============+===============+============+================+=============+
+| fprintf | | | | | | | | | | | | |
++---------------+----------------+-------------+---------------+------------+----------------+-------------+----------------+-------------+---------------+------------+----------------+-------------+
+| fscanf | | | | | | | | | | | | |
++---------------+----------------+-------------+---------------+------------+----------------+-------------+----------------+-------------+---------------+------------+----------------+-------------+
+| strtofx | | | | | | | | | | | | |
++---------------+----------------+-------------+---------------+------------+----------------+-------------+----------------+-------------+---------------+------------+----------------+-------------+
+
+
+Warnings
+========
+
+This is currently a work-in-progress, its headers, macros, and ABI are still unstable, and might be modified.
diff --git a/libc/docs/stdbit.rst b/libc/docs/stdbit.rst
index 4f242d2..3bd83ff 100644
--- a/libc/docs/stdbit.rst
+++ b/libc/docs/stdbit.rst
@@ -41,36 +41,36 @@ stdc_leading_ones_us |check|
stdc_leading_ones_ui |check|
stdc_leading_ones_ul |check|
stdc_leading_ones_ull |check|
-stdc_trailing_zeros_uc
-stdc_trailing_zeros_us
-stdc_trailing_zeros_ui
-stdc_trailing_zeros_ul
-stdc_trailing_zeros_ull
-stdc_trailing_ones_uc
-stdc_trailing_ones_us
-stdc_trailing_ones_ui
-stdc_trailing_ones_ul
-stdc_trailing_ones_ull
-stdc_first_leading_zero_uc
-stdc_first_leading_zero_us
-stdc_first_leading_zero_ui
-stdc_first_leading_zero_ul
-stdc_first_leading_zero_ull
-stdc_first_leading_one_uc
-stdc_first_leading_one_us
-stdc_first_leading_one_ui
-stdc_first_leading_one_ul
-stdc_first_leading_one_ull
-stdc_first_trailing_zero_uc
-stdc_first_trailing_zero_us
-stdc_first_trailing_zero_ui
-stdc_first_trailing_zero_ul
-stdc_first_trailing_zero_ull
-stdc_first_trailing_one_uc
-stdc_first_trailing_one_us
-stdc_first_trailing_one_ui
-stdc_first_trailing_one_ul
-stdc_first_trailing_one_ull
+stdc_trailing_zeros_uc |check|
+stdc_trailing_zeros_us |check|
+stdc_trailing_zeros_ui |check|
+stdc_trailing_zeros_ul |check|
+stdc_trailing_zeros_ull |check|
+stdc_trailing_ones_uc |check|
+stdc_trailing_ones_us |check|
+stdc_trailing_ones_ui |check|
+stdc_trailing_ones_ul |check|
+stdc_trailing_ones_ull |check|
+stdc_first_leading_zero_uc |check|
+stdc_first_leading_zero_us |check|
+stdc_first_leading_zero_ui |check|
+stdc_first_leading_zero_ul |check|
+stdc_first_leading_zero_ull |check|
+stdc_first_leading_one_uc |check|
+stdc_first_leading_one_us |check|
+stdc_first_leading_one_ui |check|
+stdc_first_leading_one_ul |check|
+stdc_first_leading_one_ull |check|
+stdc_first_trailing_zero_uc |check|
+stdc_first_trailing_zero_us |check|
+stdc_first_trailing_zero_ui |check|
+stdc_first_trailing_zero_ul |check|
+stdc_first_trailing_zero_ull |check|
+stdc_first_trailing_one_uc |check|
+stdc_first_trailing_one_us |check|
+stdc_first_trailing_one_ui |check|
+stdc_first_trailing_one_ul |check|
+stdc_first_trailing_one_ull |check|
stdc_count_zeros_uc
stdc_count_zeros_us
stdc_count_zeros_ui
@@ -116,12 +116,12 @@ __STDC_ENDIAN_BIG__
__STDC_ENDIAN_NATIVE__
stdc_leading_zeros |check|
stdc_leading_ones |check|
-stdc_trailing_zeros
-stdc_trailing_ones
-stdc_first_leading_zero
-stdc_first_leading_one
-stdc_first_trailing_zero
-stdc_first_trailing_one
+stdc_trailing_zeros |check|
+stdc_trailing_ones |check|
+stdc_first_leading_zero |check|
+stdc_first_leading_one |check|
+stdc_first_trailing_zero |check|
+stdc_first_trailing_one |check|
stdc_count_zeros
stdc_count_ones
stdc_has_single_bit
diff --git a/libc/include/CMakeLists.txt b/libc/include/CMakeLists.txt
index 3324104..5882d03 100644
--- a/libc/include/CMakeLists.txt
+++ b/libc/include/CMakeLists.txt
@@ -104,6 +104,14 @@ add_gen_header(
.llvm-libc-types.float128
)
+add_gen_header(
+ stdfix
+ DEF_FILE stdfix.h.def
+ GEN_HDR stdfix.h
+ DEPENDS
+ .llvm-libc-macros.stdfix_macros
+)
+
# TODO: This should be conditional on POSIX networking being included.
file(MAKE_DIRECTORY ${LIBC_INCLUDE_DIR}/arpa)
diff --git a/libc/include/llvm-libc-macros/CMakeLists.txt b/libc/include/llvm-libc-macros/CMakeLists.txt
index 562769a..225885d 100644
--- a/libc/include/llvm-libc-macros/CMakeLists.txt
+++ b/libc/include/llvm-libc-macros/CMakeLists.txt
@@ -227,3 +227,9 @@ add_macro_header(
HDR
inttypes-macros.h
)
+
+add_macro_header(
+ stdfix_macros
+ HDR
+ stdfix-macros.h
+)
diff --git a/libc/include/llvm-libc-macros/stdbit-macros.h b/libc/include/llvm-libc-macros/stdbit-macros.h
index 693a45e..0c97da9 100644
--- a/libc/include/llvm-libc-macros/stdbit-macros.h
+++ b/libc/include/llvm-libc-macros/stdbit-macros.h
@@ -86,6 +86,51 @@ inline unsigned stdc_first_leading_zero(unsigned long x) {
inline unsigned stdc_first_leading_zero(unsigned long long x) {
return stdc_first_leading_zero_ull(x);
}
+inline unsigned stdc_first_leading_one(unsigned char x) {
+ return stdc_first_leading_one_uc(x);
+}
+inline unsigned stdc_first_leading_one(unsigned short x) {
+ return stdc_first_leading_one_us(x);
+}
+inline unsigned stdc_first_leading_one(unsigned x) {
+ return stdc_first_leading_one_ui(x);
+}
+inline unsigned stdc_first_leading_one(unsigned long x) {
+ return stdc_first_leading_one_ul(x);
+}
+inline unsigned stdc_first_leading_one(unsigned long long x) {
+ return stdc_first_leading_one_ull(x);
+}
+inline unsigned stdc_first_trailing_zero(unsigned char x) {
+ return stdc_first_trailing_zero_uc(x);
+}
+inline unsigned stdc_first_trailing_zero(unsigned short x) {
+ return stdc_first_trailing_zero_us(x);
+}
+inline unsigned stdc_first_trailing_zero(unsigned x) {
+ return stdc_first_trailing_zero_ui(x);
+}
+inline unsigned stdc_first_trailing_zero(unsigned long x) {
+ return stdc_first_trailing_zero_ul(x);
+}
+inline unsigned stdc_first_trailing_zero(unsigned long long x) {
+ return stdc_first_trailing_zero_ull(x);
+}
+inline unsigned stdc_first_trailing_one(unsigned char x) {
+ return stdc_first_trailing_one_uc(x);
+}
+inline unsigned stdc_first_trailing_one(unsigned short x) {
+ return stdc_first_trailing_one_us(x);
+}
+inline unsigned stdc_first_trailing_one(unsigned x) {
+ return stdc_first_trailing_one_ui(x);
+}
+inline unsigned stdc_first_trailing_one(unsigned long x) {
+ return stdc_first_trailing_one_ul(x);
+}
+inline unsigned stdc_first_trailing_one(unsigned long long x) {
+ return stdc_first_trailing_one_ull(x);
+}
#else
#define stdc_leading_zeros(x) \
_Generic((x), \
@@ -122,6 +167,27 @@ inline unsigned stdc_first_leading_zero(unsigned long long x) {
unsigned: stdc_first_leading_zero_ui, \
unsigned long: stdc_first_leading_zero_ul, \
unsigned long long: stdc_first_leading_zero_ull)(x)
+#define stdc_first_leading_one(x) \
+ _Generic((x), \
+ unsigned char: stdc_first_leading_one_uc, \
+ unsigned short: stdc_first_leading_one_us, \
+ unsigned: stdc_first_leading_one_ui, \
+ unsigned long: stdc_first_leading_one_ul, \
+ unsigned long long: stdc_first_leading_one_ull)(x)
+#define stdc_first_trailing_zero(x) \
+ _Generic((x), \
+ unsigned char: stdc_first_trailing_zero_uc, \
+ unsigned short: stdc_first_trailing_zero_us, \
+ unsigned: stdc_first_trailing_zero_ui, \
+ unsigned long: stdc_first_trailing_zero_ul, \
+ unsigned long long: stdc_first_trailing_zero_ull)(x)
+#define stdc_first_trailing_one(x) \
+ _Generic((x), \
+ unsigned char: stdc_first_trailing_one_uc, \
+ unsigned short: stdc_first_trailing_one_us, \
+ unsigned: stdc_first_trailing_one_ui, \
+ unsigned long: stdc_first_trailing_one_ul, \
+ unsigned long long: stdc_first_trailing_one_ull)(x)
#endif // __cplusplus
#endif // __LLVM_LIBC_MACROS_STDBIT_MACROS_H
diff --git a/libc/include/llvm-libc-macros/stdfix-macros.h b/libc/include/llvm-libc-macros/stdfix-macros.h
new file mode 100644
index 0000000..9c83dbc
--- /dev/null
+++ b/libc/include/llvm-libc-macros/stdfix-macros.h
@@ -0,0 +1,330 @@
+//===-- Definitions from stdfix.h -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __LLVM_LIBC_MACROS_STDFIX_MACROS_H
+#define __LLVM_LIBC_MACROS_STDFIX_MACROS_H
+
+#ifdef __clang__
+#if (!defined(__cplusplus) || (__clang_major__ >= 18))
+// _Fract and _Accum types are available
+#define LIBC_COMPILER_HAS_FIXED_POINT
+#endif // __cplusplus
+#endif // __clang__
+
+#ifdef LIBC_COMPILER_HAS_FIXED_POINT
+
+#define fract _Fract
+#define accum _Accum
+#define sat _Sat
+
+// Default values: from ISO/IEC TR 18037:2008 standard - Annex A.3 - Typical
+// desktop processor.
+
+#ifdef __SFRACT_FBIT__
+#define SFRACT_FBIT __SFRACT_FBIT__
+#else
+#define SFRACT_FBIT 7
+#endif // SFRACT_FBIT
+
+#ifdef __SFRACT_MIN__
+#define SFRACT_MIN __SFRACT_MIN__
+#else
+#define SFRACT_MIN (-0.5HR - 0.5HR)
+#endif // SFRACT_MIN
+
+#ifdef __SFRACT_MAX__
+#define SFRACT_MAX __SFRACT_MAX__
+#else
+#define SFRACT_MAX 0x1.FCp-1HR
+#endif // SFRACT_MAX
+
+#ifdef __SFRACT_EPSILON__
+#define SFRACT_EPSILON __SFRACT_EPSILON__
+#else
+#define SFRACT_EPSILON 0x1.0p-7HR
+#endif // SFRACT_EPSILON
+
+#ifdef __USFRACT_FBIT__
+#define USFRACT_FBIT __USFRACT_FBIT__
+#else
+#define USFRACT_FBIT 8
+#endif // USFRACT_FBIT
+
+#define USFRACT_MIN 0.0UHR
+
+#ifdef __USFRACT_MAX__
+#define USFRACT_MAX __USFRACT_MAX__
+#else
+#define USFRACT_MAX 0x1.FEp-1UHR
+#endif // USFRACT_MAX
+
+#ifdef __USFRACT_EPSILON__
+#define USFRACT_EPSILON __USFRACT_EPSILON__
+#else
+#define USFRACT_EPSILON 0x1.0p-8UHR
+#endif // USFRACT_EPSILON
+
+#ifdef __FRACT_FBIT__
+#define FRACT_FBIT __FRACT_FBIT__
+#else
+#define FRACT_FBIT 15
+#endif // FRACT_FBIT
+
+#ifdef __FRACT_MIN__
+#define FRACT_MIN __FRACT_MIN__
+#else
+#define FRACT_MIN (-0.5R - 0.5R)
+#endif // FRACT_MIN
+
+#ifdef __FRACT_MAX__
+#define FRACT_MAX __FRACT_MAX__
+#else
+#define FRACT_MAX 0x1.FFFCp-1R
+#endif // FRACT_MAX
+
+#ifdef __FRACT_EPSILON__
+#define FRACT_EPSILON __FRACT_EPSILON__
+#else
+#define FRACT_EPSILON 0x1.0p-15R
+#endif // FRACT_EPSILON
+
+#ifdef __UFRACT_FBIT__
+#define UFRACT_FBIT __UFRACT_FBIT__
+#else
+#define UFRACT_FBIT 16
+#endif // UFRACT_FBIT
+
+#define UFRACT_MIN 0.0UR
+
+#ifdef __UFRACT_MAX__
+#define UFRACT_MAX __UFRACT_MAX__
+#else
+#define UFRACT_MAX 0x1.FFFEp-1UR
+#endif // UFRACT_MAX
+
+#ifdef __UFRACT_EPSILON__
+#define UFRACT_EPSILON __UFRACT_EPSILON__
+#else
+#define UFRACT_EPSILON 0x1.0p-16UR
+#endif // UFRACT_EPSILON
+
+#ifdef __LFRACT_FBIT__
+#define LFRACT_FBIT __LFRACT_FBIT__
+#else
+#define LFRACT_FBIT 31
+#endif // LFRACT_FBIT
+
+#ifdef __LFRACT_MIN__
+#define LFRACT_MIN __LFRACT_MIN__
+#else
+#define LFRACT_MIN (-0.5LR - 0.5LR)
+#endif // LFRACT_MIN
+
+#ifdef __LFRACT_MAX__
+#define LFRACT_MAX __LFRACT_MAX__
+#else
+#define LFRACT_MAX 0x1.FFFFFFFCp-1LR
+#endif // LFRACT_MAX
+
+#ifdef __LFRACT_EPSILON__
+#define LFRACT_EPSILON __LFRACT_EPSILON__
+#else
+#define LFRACT_EPSILON 0x1.0p-31LR
+#endif // LFRACT_EPSILON
+
+#ifdef __ULFRACT_FBIT__
+#define ULFRACT_FBIT __ULFRACT_FBIT__
+#else
+#define ULFRACT_FBIT 32
+#endif // ULFRACT_FBIT
+
+#define ULFRACT_MIN 0.0ULR
+
+#ifdef __ULFRACT_MAX__
+#define ULFRACT_MAX __ULFRACT_MAX__
+#else
+#define ULFRACT_MAX 0x1.FFFFFFFEp-1ULR
+#endif // ULFRACT_MAX
+
+#ifdef __ULFRACT_EPSILON__
+#define ULFRACT_EPSILON __ULFRACT_EPSILON__
+#else
+#define ULFRACT_EPSILON 0x1.0p-32ULR
+#endif // ULFRACT_EPSILON
+
+#ifdef __SACCUM_FBIT__
+#define SACCUM_FBIT __SACCUM_FBIT__
+#else
+#define SACCUM_FBIT 7
+#endif // SACCUM_FBIT
+
+#ifdef __SACCUM_IBIT__
+#define SACCUM_IBIT __SACCUM_IBIT__
+#else
+#define SACCUM_IBIT 8
+#endif // SACCUM_IBIT
+
+#ifdef __SACCUM_MIN__
+#define SACCUM_MIN __SACCUM_MIN__
+#else
+#define SACCUM_MIN (-0x1.0p+7HK - 0x1.0p+7HK)
+#endif // SACCUM_MIN
+
+#ifdef __SACCUM_MAX__
+#define SACCUM_MAX __SACCUM_MAX__
+#else
+#define SACCUM_MAX 0x1.FFFCp+7HK
+#endif // SACCUM_MAX
+
+#ifdef __SACCUM_EPSILON__
+#define SACCUM_EPSILON __SACCUM_EPSILON__
+#else
+#define SACCUM_EPSILON 0x1.0p-7HK
+#endif // SACCUM_EPSILON
+
+#ifdef __USACCUM_FBIT__
+#define USACCUM_FBIT __USACCUM_FBIT__
+#else
+#define USACCUM_FBIT 8
+#endif // USACCUM_FBIT
+
+#ifdef __USACCUM_IBIT__
+#define USACCUM_IBIT __USACCUM_IBIT__
+#else
+#define USACCUM_IBIT 8
+#endif // USACCUM_IBIT
+
+#define USACCUM_MIN 0.0UHK
+
+#ifdef __USACCUM_MAX__
+#define USACCUM_MAX __USACCUM_MAX__
+#else
+#define USACCUM_MAX 0x1.FFFEp+7UHK
+#endif // USACCUM_MAX
+
+#ifdef __USACCUM_EPSILON__
+#define USACCUM_EPSILON __USACCUM_EPSILON__
+#else
+#define USACCUM_EPSILON 0x1.0p-8UHK
+#endif // USACCUM_EPSILON
+
+#ifdef __ACCUM_FBIT__
+#define ACCUM_FBIT __ACCUM_FBIT__
+#else
+#define ACCUM_FBIT 15
+#endif // ACCUM_FBIT
+
+#ifdef __ACCUM_IBIT__
+#define ACCUM_IBIT __ACCUM_IBIT__
+#else
+#define ACCUM_IBIT 16
+#endif // ACCUM_IBIT
+
+#ifdef __ACCUM_MIN__
+#define ACCUM_MIN __ACCUM_MIN__
+#else
+#define ACCUM_MIN (-0x1.0p+15K - 0x1.0p+15K)
+#endif // ACCUM_MIN
+
+#ifdef __ACCUM_MAX__
+#define ACCUM_MAX __ACCUM_MAX__
+#else
+#define ACCUM_MAX 0x1.FFFFFFFCp+15K
+#endif // ACCUM_MAX
+
+#ifdef __ACCUM_EPSILON__
+#define ACCUM_EPSILON __ACCUM_EPSILON__
+#else
+#define ACCUM_EPSILON 0x1.0p-15K
+#endif // ACCUM_EPSILON
+
+#ifdef __UACCUM_FBIT__
+#define UACCUM_FBIT __UACCUM_FBIT__
+#else
+#define UACCUM_FBIT 16
+#endif // UACCUM_FBIT
+
+#ifdef __UACCUM_IBIT__
+#define UACCUM_IBIT __UACCUM_IBIT__
+#else
+#define UACCUM_IBIT 16
+#endif // UACCUM_IBIT
+
+#define UACCUM_MIN 0.0UK
+
+#ifdef __UACCUM_MAX__
+#define UACCUM_MAX __UACCUM_MAX__
+#else
+#define UACCUM_MAX 0x1.FFFFFFFEp+15UK
+#endif // UACCUM_MAX
+
+#ifdef __UACCUM_EPSILON__
+#define UACCUM_EPSILON __UACCUM_EPSILON__
+#else
+#define UACCUM_EPSILON 0x1.0p-16UK
+#endif // UACCUM_EPSILON
+
+#ifdef __LACCUM_FBIT__
+#define LACCUM_FBIT __LACCUM_FBIT__
+#else
+#define LACCUM_FBIT 31
+#endif // LACCUM_FBIT
+
+#ifdef __LACCUM_IBIT__
+#define LACCUM_IBIT __LACCUM_IBIT__
+#else
+#define LACCUM_IBIT 32
+#endif // LACCUM_IBIT
+
+#ifdef __LACCUM_MIN__
+#define LACCUM_MIN __LACCUM_MIN__
+#else
+#define LACCUM_MIN (-0x1.0p+31LK - 0x1.0p+31LK)
+#endif // LACCUM_MIN
+
+#ifdef __LACCUM_MAX__
+#define LACCUM_MAX __LACCUM_MAX__
+#else
+#define LACCUM_MAX 0x1.FFFFFFFFFFFFFFFCp+31LK
+#endif // LACCUM_MAX
+
+#ifdef __LACCUM_EPSILON__
+#define LACCUM_EPSILON __LACCUM_EPSILON__
+#else
+#define LACCUM_EPSILON 0x1.0p-31LK
+#endif // LACCUM_EPSILON
+
+#ifdef __ULACCUM_FBIT__
+#define ULACCUM_FBIT __ULACCUM_FBIT__
+#else
+#define ULACCUM_FBIT 32
+#endif // ULACCUM_FBIT
+
+#ifdef __ULACCUM_IBIT__
+#define ULACCUM_IBIT __ULACCUM_IBIT__
+#else
+#define ULACCUM_IBIT 32
+#endif // ULACCUM_IBIT
+
+#define ULACCUM_MIN 0.0ULK
+
+#ifdef __ULACCUM_MAX__
+#define ULACCUM_MAX __ULACCUM_MAX__
+#else
+#define ULACCUM_MAX 0x1.FFFFFFFFFFFFFFFEp+31ULK
+#endif // ULACCUM_MAX
+
+#ifdef __ULACCUM_EPSILON__
+#define ULACCUM_EPSILON __ULACCUM_EPSILON__
+#else
+#define ULACCUM_EPSILON 0x1.0p-32ULK
+#endif // ULACCUM_EPSILON
+
+#endif // LIBC_COMPILER_HAS_FIXED_POINT
+
+#endif // __LLVM_LIBC_MACROS_STDFIX_MACROS_H
diff --git a/libc/include/stdfix.h.def b/libc/include/stdfix.h.def
new file mode 100644
index 0000000..368eeb3
--- /dev/null
+++ b/libc/include/stdfix.h.def
@@ -0,0 +1,21 @@
+//===-- C standard library header stdfix.h --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_STDFIX_H
+#define LLVM_LIBC_STDFIX_H
+
+#include <__llvm-libc-common.h>
+#include <llvm-libc-macros/stdfix-macros.h>
+
+// From ISO/IEC TR 18037:2008 standard:
+// https://www.iso.org/standard/51126.html
+// https://standards.iso.org/ittf/PubliclyAvailableStandards/c051126_ISO_IEC_TR_18037_2008.zip
+
+%%public_api()
+
+#endif // LLVM_LIBC_STDFIX_H
diff --git a/libc/spec/stdc.td b/libc/spec/stdc.td
index 1720a4a..79487cb 100644
--- a/libc/spec/stdc.td
+++ b/libc/spec/stdc.td
@@ -413,6 +413,7 @@ def StdC : StandardSpec<"stdc"> {
FunctionSpec<"ldexp", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<IntType>]>,
FunctionSpec<"ldexpf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<IntType>]>,
FunctionSpec<"ldexpl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<IntType>]>,
+ GuardedFunctionSpec<"ldexpf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<IntType>], "LIBC_COMPILER_HAS_FLOAT128">,
FunctionSpec<"log10", RetValSpec<DoubleType>, [ArgSpec<DoubleType>]>,
FunctionSpec<"log10f", RetValSpec<FloatType>, [ArgSpec<FloatType>]>,
@@ -781,7 +782,10 @@ def StdC : StandardSpec<"stdc"> {
Macro<"stdc_leading_ones">,
Macro<"stdc_trailing_zeros">,
Macro<"stdc_trailing_ones">,
- Macro<"stdc_first_leading_zero">
+ Macro<"stdc_first_leading_zero">,
+ Macro<"stdc_first_leading_one">,
+ Macro<"stdc_first_trailing_zero">,
+ Macro<"stdc_first_trailing_one">
], // Macros
[], // Types
[], // Enumerations
@@ -810,7 +814,17 @@ def StdC : StandardSpec<"stdc"> {
FunctionSpec<"stdc_first_leading_zero_us", RetValSpec<UnsignedIntType>, [ArgSpec<UnsignedShortType>]>,
FunctionSpec<"stdc_first_leading_zero_ui", RetValSpec<UnsignedIntType>, [ArgSpec<UnsignedIntType>]>,
FunctionSpec<"stdc_first_leading_zero_ul", RetValSpec<UnsignedIntType>, [ArgSpec<UnsignedLongType>]>,
- FunctionSpec<"stdc_first_leading_zero_ull", RetValSpec<UnsignedIntType>, [ArgSpec<UnsignedLongLongType>]>
+ FunctionSpec<"stdc_first_leading_zero_ull", RetValSpec<UnsignedIntType>, [ArgSpec<UnsignedLongLongType>]>,
+ FunctionSpec<"stdc_first_leading_one_uc", RetValSpec<UnsignedIntType>, [ArgSpec<UnsignedCharType>]>,
+ FunctionSpec<"stdc_first_leading_one_us", RetValSpec<UnsignedIntType>, [ArgSpec<UnsignedShortType>]>,
+ FunctionSpec<"stdc_first_leading_one_ui", RetValSpec<UnsignedIntType>, [ArgSpec<UnsignedIntType>]>,
+ FunctionSpec<"stdc_first_leading_one_ul", RetValSpec<UnsignedIntType>, [ArgSpec<UnsignedLongType>]>,
+ FunctionSpec<"stdc_first_leading_one_ull", RetValSpec<UnsignedIntType>, [ArgSpec<UnsignedLongLongType>]>,
+ FunctionSpec<"stdc_first_trailing_one_uc", RetValSpec<UnsignedIntType>, [ArgSpec<UnsignedCharType>]>,
+ FunctionSpec<"stdc_first_trailing_one_us", RetValSpec<UnsignedIntType>, [ArgSpec<UnsignedShortType>]>,
+ FunctionSpec<"stdc_first_trailing_one_ui", RetValSpec<UnsignedIntType>, [ArgSpec<UnsignedIntType>]>,
+ FunctionSpec<"stdc_first_trailing_one_ul", RetValSpec<UnsignedIntType>, [ArgSpec<UnsignedLongType>]>,
+ FunctionSpec<"stdc_first_trailing_one_ull", RetValSpec<UnsignedIntType>, [ArgSpec<UnsignedLongLongType>]>
] // Functions
>;
diff --git a/libc/spec/stdc_ext.td b/libc/spec/stdc_ext.td
new file mode 100644
index 0000000..4a5b74f
--- /dev/null
+++ b/libc/spec/stdc_ext.td
@@ -0,0 +1,16 @@
+def StdcExt : StandardSpec<"stdc_ext"> {
+ // From ISO/IEC TR 18037:2008 standard:
+ // https://standards.iso.org/ittf/PubliclyAvailableStandards/c051126_ISO_IEC_TR_18037_2008.zip
+ HeaderSpec StdFix = HeaderSpec<
+ "stdfix.h",
+ [], // macros
+ [], // types
+ [], // enums
+ [ // functions
+ ]
+ >;
+
+ let Headers = [
+ StdFix,
+ ];
+}
diff --git a/libc/src/__support/CMakeLists.txt b/libc/src/__support/CMakeLists.txt
index bd814a0..1a4b3e9 100644
--- a/libc/src/__support/CMakeLists.txt
+++ b/libc/src/__support/CMakeLists.txt
@@ -147,6 +147,15 @@ add_header_library(
)
add_header_library(
+ integer_literals
+ HDRS
+ integer_literals.h
+ DEPENDS
+ .uint128
+ libc.src.__support.CPP.limits
+)
+
+add_header_library(
integer_operations
HDRS
integer_operations.h
@@ -271,3 +280,5 @@ add_subdirectory(threads)
add_subdirectory(File)
add_subdirectory(HashTable)
+
+add_subdirectory(fixed_point)
diff --git a/libc/src/__support/CPP/CMakeLists.txt b/libc/src/__support/CPP/CMakeLists.txt
index f10bb93..d7474127 100644
--- a/libc/src/__support/CPP/CMakeLists.txt
+++ b/libc/src/__support/CPP/CMakeLists.txt
@@ -122,6 +122,7 @@ add_header_library(
type_traits/is_convertible.h
type_traits/is_destructible.h
type_traits/is_enum.h
+ type_traits/is_fixed_point.h
type_traits/is_floating_point.h
type_traits/is_function.h
type_traits/is_integral.h
@@ -155,6 +156,7 @@ add_header_library(
libc.src.__support.macros.attributes
libc.src.__support.macros.config
libc.src.__support.macros.properties.float
+ libc.include.llvm-libc-macros.stdfix_macros
)
add_header_library(
diff --git a/libc/src/__support/CPP/bit.h b/libc/src/__support/CPP/bit.h
index 392fbe2..f5e50262 100644
--- a/libc/src/__support/CPP/bit.h
+++ b/libc/src/__support/CPP/bit.h
@@ -93,15 +93,9 @@ template <typename T, typename = cpp::enable_if_t<cpp::is_unsigned_v<T>>>
#if LIBC_HAS_BUILTIN(__builtin_ctzs)
ADD_SPECIALIZATION(countr_zero, unsigned short, __builtin_ctzs)
#endif
-#if LIBC_HAS_BUILTIN(__builtin_ctz)
ADD_SPECIALIZATION(countr_zero, unsigned int, __builtin_ctz)
-#endif
-#if LIBC_HAS_BUILTIN(__builtin_ctzl)
ADD_SPECIALIZATION(countr_zero, unsigned long, __builtin_ctzl)
-#endif
-#if LIBC_HAS_BUILTIN(__builtin_ctzll)
ADD_SPECIALIZATION(countr_zero, unsigned long long, __builtin_ctzll)
-#endif
/// Count number of 0's from the most significant bit to the least
/// stopping at the first 1.
@@ -128,15 +122,9 @@ template <typename T, typename = cpp::enable_if_t<cpp::is_unsigned_v<T>>>
#if LIBC_HAS_BUILTIN(__builtin_clzs)
ADD_SPECIALIZATION(countl_zero, unsigned short, __builtin_clzs)
#endif
-#if LIBC_HAS_BUILTIN(__builtin_clz)
ADD_SPECIALIZATION(countl_zero, unsigned int, __builtin_clz)
-#endif
-#if LIBC_HAS_BUILTIN(__builtin_clzl)
ADD_SPECIALIZATION(countl_zero, unsigned long, __builtin_clzl)
-#endif
-#if LIBC_HAS_BUILTIN(__builtin_clzll)
ADD_SPECIALIZATION(countl_zero, unsigned long long, __builtin_clzll)
-#endif
#undef ADD_SPECIALIZATION
@@ -238,35 +226,27 @@ LIBC_INLINE constexpr To bit_or_static_cast(const From &from) {
}
}
-#define SPECIALIZE_FLZ(NAME, TYPE, BUILTIN) \
- template <> [[nodiscard]] LIBC_INLINE constexpr int NAME<TYPE>(TYPE value) { \
- static_assert(cpp::is_unsigned_v<TYPE>); \
- return value == cpp::numeric_limits<TYPE>::max() \
- ? 0 \
- : BUILTIN(static_cast<TYPE>(~value)) + 1; \
- }
-
template <typename T, typename = cpp::enable_if_t<cpp::is_unsigned_v<T>>>
[[nodiscard]] LIBC_INLINE constexpr int first_leading_zero(T value) {
+ return value == cpp::numeric_limits<T>::max() ? 0 : countl_one(value) + 1;
+}
+
+template <typename T, typename = cpp::enable_if_t<cpp::is_unsigned_v<T>>>
+[[nodiscard]] LIBC_INLINE constexpr int first_leading_one(T value) {
+ return first_leading_zero(static_cast<T>(~value));
+}
+
+template <typename T, typename = cpp::enable_if_t<cpp::is_unsigned_v<T>>>
+[[nodiscard]] LIBC_INLINE constexpr int first_trailing_zero(T value) {
return value == cpp::numeric_limits<T>::max()
? 0
- : countl_zero(static_cast<T>(~value)) + 1;
+ : countr_zero(static_cast<T>(~value)) + 1;
}
-#if LIBC_HAS_BUILTIN(__builtin_clzs)
-SPECIALIZE_FLZ(first_leading_zero, unsigned short, __builtin_clzs)
-#endif
-#if LIBC_HAS_BUILTIN(__builtin_clz)
-SPECIALIZE_FLZ(first_leading_zero, unsigned int, __builtin_clz)
-#endif
-#if LIBC_HAS_BUILTIN(__builtin_clzl)
-SPECIALIZE_FLZ(first_leading_zero, unsigned long, __builtin_clzl)
-#endif
-#if LIBC_HAS_BUILTIN(__builtin_clzll)
-SPECIALIZE_FLZ(first_leading_zero, unsigned long long, __builtin_clzll)
-#endif
-
-#undef SPECIALIZE_FLZ
+template <typename T, typename = cpp::enable_if_t<cpp::is_unsigned_v<T>>>
+[[nodiscard]] LIBC_INLINE constexpr int first_trailing_one(T value) {
+ return value == cpp::numeric_limits<T>::max() ? 0 : countr_zero(value) + 1;
+}
} // namespace LIBC_NAMESPACE::cpp
diff --git a/libc/src/__support/CPP/type_traits.h b/libc/src/__support/CPP/type_traits.h
index 1eb2f34..697cf79 100644
--- a/libc/src/__support/CPP/type_traits.h
+++ b/libc/src/__support/CPP/type_traits.h
@@ -28,6 +28,7 @@
#include "src/__support/CPP/type_traits/is_convertible.h"
#include "src/__support/CPP/type_traits/is_destructible.h"
#include "src/__support/CPP/type_traits/is_enum.h"
+#include "src/__support/CPP/type_traits/is_fixed_point.h"
#include "src/__support/CPP/type_traits/is_floating_point.h"
#include "src/__support/CPP/type_traits/is_function.h"
#include "src/__support/CPP/type_traits/is_integral.h"
diff --git a/libc/src/__support/CPP/type_traits/is_fixed_point.h b/libc/src/__support/CPP/type_traits/is_fixed_point.h
new file mode 100644
index 0000000..317ba39
--- /dev/null
+++ b/libc/src/__support/CPP/type_traits/is_fixed_point.h
@@ -0,0 +1,46 @@
+//===-- is_fixed_point type_traits ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_IS_FIXED_POINT_H
+#define LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_IS_FIXED_POINT_H
+
+#include "src/__support/CPP/type_traits/is_same.h"
+#include "src/__support/CPP/type_traits/remove_cv.h"
+#include "src/__support/macros/attributes.h"
+
+#include "include/llvm-libc-macros/stdfix-macros.h"
+
+namespace LIBC_NAMESPACE::cpp {
+
+// is_fixed_point
+#ifdef LIBC_COMPILER_HAS_FIXED_POINT
+template <typename T> struct is_fixed_point {
+private:
+ template <typename Head, typename... Args>
+ LIBC_INLINE static constexpr bool __is_unqualified_any_of() {
+ return (... || is_same_v<remove_cv_t<Head>, Args>);
+ }
+
+public:
+ LIBC_INLINE_VAR static constexpr bool value = __is_unqualified_any_of<
+ T, short fract, fract, long fract, unsigned short fract, unsigned fract,
+ unsigned long fract, short accum, accum, long accum, unsigned short accum,
+ unsigned accum, unsigned long accum, short sat fract, sat fract,
+ long sat fract, unsigned short sat fract, unsigned sat fract,
+ unsigned long sat fract, short sat accum, sat accum, long sat accum,
+ unsigned short sat accum, unsigned sat accum, unsigned long sat accum>();
+};
+#else
+template <typename T> struct is_fixed_point : false_type {};
+#endif // LIBC_COMPILER_HAS_FIXED_POINT
+
+template <typename T>
+LIBC_INLINE_VAR constexpr bool is_fixed_point_v = is_fixed_point<T>::value;
+
+} // namespace LIBC_NAMESPACE::cpp
+
+#endif // LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_IS_INTEGRAL_H
diff --git a/libc/src/__support/FPUtil/BasicOperations.h b/libc/src/__support/FPUtil/BasicOperations.h
index ccc61a8..a19d6d0 100644
--- a/libc/src/__support/FPUtil/BasicOperations.h
+++ b/libc/src/__support/FPUtil/BasicOperations.h
@@ -19,9 +19,7 @@ namespace fputil {
template <typename T, cpp::enable_if_t<cpp::is_floating_point_v<T>, int> = 0>
LIBC_INLINE T abs(T x) {
- FPBits<T> bits(x);
- bits.set_sign(Sign::POS);
- return bits.get_val();
+ return FPBits<T>(x).abs().get_val();
}
template <typename T, cpp::enable_if_t<cpp::is_floating_point_v<T>, int> = 0>
diff --git a/libc/src/__support/FPUtil/CMakeLists.txt b/libc/src/__support/FPUtil/CMakeLists.txt
index 3307d33..0c932e8 100644
--- a/libc/src/__support/FPUtil/CMakeLists.txt
+++ b/libc/src/__support/FPUtil/CMakeLists.txt
@@ -76,24 +76,6 @@ add_header_library(
)
add_header_library(
- manipulation_functions
- HDRS
- ManipulationFunctions.h
- DEPENDS
- .fenv_impl
- .fp_bits
- .nearest_integer_operations
- .normal_float
- libc.src.__support.CPP.bit
- libc.src.__support.CPP.limits
- libc.src.__support.CPP.type_traits
- libc.src.__support.common
- libc.src.__support.macros.optimization
- libc.include.math
- libc.src.errno.errno
-)
-
-add_header_library(
basic_operations
HDRS
BasicOperations.h
@@ -221,4 +203,23 @@ add_header_library(
libc.src.__support.macros.optimization
)
+add_header_library(
+ manipulation_functions
+ HDRS
+ ManipulationFunctions.h
+ DEPENDS
+ .fenv_impl
+ .fp_bits
+ .dyadic_float
+ .nearest_integer_operations
+ .normal_float
+ libc.src.__support.CPP.bit
+ libc.src.__support.CPP.limits
+ libc.src.__support.CPP.type_traits
+ libc.src.__support.common
+ libc.src.__support.macros.optimization
+ libc.include.math
+ libc.src.errno.errno
+)
+
add_subdirectory(generic)
diff --git a/libc/src/__support/FPUtil/FPBits.h b/libc/src/__support/FPUtil/FPBits.h
index 6665c90..b3179a2 100644
--- a/libc/src/__support/FPUtil/FPBits.h
+++ b/libc/src/__support/FPUtil/FPBits.h
@@ -633,13 +633,13 @@ protected:
using typename UP::Significand;
using UP::FP_MASK;
- using UP::SIG_LEN;
public:
// Constants.
using UP::EXP_BIAS;
using UP::EXP_MASK;
using UP::FRACTION_MASK;
+ using UP::SIG_LEN;
using UP::SIGN_MASK;
LIBC_INLINE_VAR static constexpr int MAX_BIASED_EXPONENT =
(1 << UP::EXP_LEN) - 1;
@@ -732,11 +732,14 @@ public:
// Unsafe function to create a floating point representation.
// It simply packs the sign, biased exponent and mantissa values without
// checking bound nor normalization.
+ //
+ // WARNING: For X86 Extended Precision, implicit bit needs to be set correctly
+ // in the 'mantissa' by the caller. This function will not check for its
+ // validity.
+ //
// FIXME: Use an uint32_t for 'biased_exp'.
LIBC_INLINE static constexpr RetT
create_value(Sign sign, StorageType biased_exp, StorageType mantissa) {
- static_assert(fp_type != FPType::X86_Binary80,
- "This function is not tested for X86 Extended Precision");
return RetT(encode(sign, BiasedExponent(static_cast<uint32_t>(biased_exp)),
Significand(mantissa)));
}
diff --git a/libc/src/__support/FPUtil/ManipulationFunctions.h b/libc/src/__support/FPUtil/ManipulationFunctions.h
index 9becbaa..9e760a2 100644
--- a/libc/src/__support/FPUtil/ManipulationFunctions.h
+++ b/libc/src/__support/FPUtil/ManipulationFunctions.h
@@ -12,6 +12,8 @@
#include "FPBits.h"
#include "NearestIntegerOperations.h"
#include "NormalFloat.h"
+#include "dyadic_float.h"
+#include "rounding_mode.h"
#include "src/__support/CPP/bit.h"
#include "src/__support/CPP/limits.h" // INT_MAX, INT_MIN
@@ -117,10 +119,8 @@ LIBC_INLINE T logb(T x) {
template <typename T, cpp::enable_if_t<cpp::is_floating_point_v<T>, int> = 0>
LIBC_INLINE T ldexp(T x, int exp) {
- if (LIBC_UNLIKELY(exp == 0))
- return x;
FPBits<T> bits(x);
- if (LIBC_UNLIKELY(bits.is_zero() || bits.is_inf_or_nan()))
+ if (LIBC_UNLIKELY((exp == 0) || bits.is_zero() || bits.is_inf_or_nan()))
return x;
// NormalFloat uses int32_t to store the true exponent value. We should ensure
@@ -129,18 +129,40 @@ LIBC_INLINE T ldexp(T x, int exp) {
// early. Because the result of the ldexp operation can be a subnormal number,
// we need to accommodate the (mantissaWidth + 1) worth of shift in
// calculating the limit.
- int exp_limit = FPBits<T>::MAX_BIASED_EXPONENT + FPBits<T>::FRACTION_LEN + 1;
- if (exp > exp_limit)
- return FPBits<T>::inf(bits.sign()).get_val();
+ constexpr int EXP_LIMIT =
+ FPBits<T>::MAX_BIASED_EXPONENT + FPBits<T>::FRACTION_LEN + 1;
+ if (LIBC_UNLIKELY(exp > EXP_LIMIT)) {
+ int rounding_mode = quick_get_round();
+ Sign sign = bits.sign();
+
+ if ((sign == Sign::POS && rounding_mode == FE_DOWNWARD) ||
+ (sign == Sign::NEG && rounding_mode == FE_UPWARD) ||
+ (rounding_mode == FE_TOWARDZERO))
+ return FPBits<T>::max_normal(sign).get_val();
+
+ set_errno_if_required(ERANGE);
+ raise_except_if_required(FE_OVERFLOW);
+ return FPBits<T>::inf(sign).get_val();
+ }
// Similarly on the negative side we return zero early if |exp| is too small.
- if (exp < -exp_limit)
- return FPBits<T>::zero(bits.sign()).get_val();
+ if (LIBC_UNLIKELY(exp < -EXP_LIMIT)) {
+ int rounding_mode = quick_get_round();
+ Sign sign = bits.sign();
+
+ if ((sign == Sign::POS && rounding_mode == FE_UPWARD) ||
+ (sign == Sign::NEG && rounding_mode == FE_DOWNWARD))
+ return FPBits<T>::min_subnormal(sign).get_val();
+
+ set_errno_if_required(ERANGE);
+ raise_except_if_required(FE_UNDERFLOW);
+ return FPBits<T>::zero(sign).get_val();
+ }
// For all other values, NormalFloat to T conversion handles it the right way.
- NormalFloat<T> normal(bits);
+ DyadicFloat<FPBits<T>::STORAGE_LEN> normal(bits.get_val());
normal.exponent += exp;
- return normal;
+ return static_cast<T>(normal);
}
template <typename T, typename U,
diff --git a/libc/src/__support/FPUtil/dyadic_float.h b/libc/src/__support/FPUtil/dyadic_float.h
index 888d7ff..382904c 100644
--- a/libc/src/__support/FPUtil/dyadic_float.h
+++ b/libc/src/__support/FPUtil/dyadic_float.h
@@ -44,7 +44,7 @@ template <size_t Bits> struct DyadicFloat {
static_assert(FPBits<T>::FRACTION_LEN < Bits);
FPBits<T> x_bits(x);
sign = x_bits.sign();
- exponent = x_bits.get_exponent() - FPBits<T>::FRACTION_LEN;
+ exponent = x_bits.get_explicit_exponent() - FPBits<T>::FRACTION_LEN;
mantissa = MantissaType(x_bits.get_explicit_mantissa());
normalize();
}
@@ -79,25 +79,32 @@ template <size_t Bits> struct DyadicFloat {
return *this;
}
- // Assume that it is already normalized and output is not underflow.
+ // Assume that it is already normalized.
// Output is rounded correctly with respect to the current rounding mode.
- // TODO(lntue): Add support for underflow.
- // TODO(lntue): Test or add specialization for x86 long double.
template <typename T,
typename = cpp::enable_if_t<cpp::is_floating_point_v<T> &&
(FPBits<T>::FRACTION_LEN < Bits),
void>>
explicit operator T() const {
- // TODO(lntue): Do we need to treat signed zeros properly?
- if (mantissa.is_zero())
- return 0.0;
+ if (LIBC_UNLIKELY(mantissa.is_zero()))
+ return FPBits<T>::zero(sign).get_val();
// Assume that it is normalized, and output is also normal.
constexpr uint32_t PRECISION = FPBits<T>::FRACTION_LEN + 1;
using output_bits_t = typename FPBits<T>::StorageType;
+ constexpr output_bits_t IMPLICIT_MASK =
+ FPBits<T>::SIG_MASK - FPBits<T>::FRACTION_MASK;
int exp_hi = exponent + static_cast<int>((Bits - 1) + FPBits<T>::EXP_BIAS);
+ if (LIBC_UNLIKELY(exp_hi > 2 * FPBits<T>::EXP_BIAS)) {
+ // Results overflow.
+ T d_hi =
+ FPBits<T>::create_value(sign, 2 * FPBits<T>::EXP_BIAS, IMPLICIT_MASK)
+ .get_val();
+ return T(2) * d_hi;
+ }
+
bool denorm = false;
uint32_t shift = Bits - PRECISION;
if (LIBC_UNLIKELY(exp_hi <= 0)) {
@@ -112,49 +119,57 @@ template <size_t Bits> struct DyadicFloat {
MantissaType m_hi(mantissa >> shift);
- T d_hi = FPBits<T>::create_value(sign, exp_hi,
- static_cast<output_bits_t>(m_hi) &
- FPBits<T>::FRACTION_MASK)
+ T d_hi = FPBits<T>::create_value(
+ sign, exp_hi,
+ (static_cast<output_bits_t>(m_hi) & FPBits<T>::SIG_MASK) |
+ IMPLICIT_MASK)
.get_val();
- const MantissaType round_mask = MantissaType(1) << (shift - 1);
- const MantissaType sticky_mask = round_mask - MantissaType(1);
+ MantissaType round_mask = MantissaType(1) << (shift - 1);
+ MantissaType sticky_mask = round_mask - MantissaType(1);
bool round_bit = !(mantissa & round_mask).is_zero();
bool sticky_bit = !(mantissa & sticky_mask).is_zero();
int round_and_sticky = int(round_bit) * 2 + int(sticky_bit);
T d_lo;
+
if (LIBC_UNLIKELY(exp_lo <= 0)) {
// d_lo is denormal, but the output is normal.
int scale_up_exponent = 2 * PRECISION;
T scale_up_factor =
FPBits<T>::create_value(sign, FPBits<T>::EXP_BIAS + scale_up_exponent,
- output_bits_t(0))
+ IMPLICIT_MASK)
.get_val();
T scale_down_factor =
FPBits<T>::create_value(sign, FPBits<T>::EXP_BIAS - scale_up_exponent,
- output_bits_t(0))
+ IMPLICIT_MASK)
.get_val();
d_lo = FPBits<T>::create_value(sign, exp_lo + scale_up_exponent,
- output_bits_t(0))
+ IMPLICIT_MASK)
.get_val();
return multiply_add(d_lo, T(round_and_sticky), d_hi * scale_up_factor) *
scale_down_factor;
}
- d_lo = FPBits<T>::create_value(sign, exp_lo, output_bits_t(0)).get_val();
+ d_lo = FPBits<T>::create_value(sign, exp_lo, IMPLICIT_MASK).get_val();
// Still correct without FMA instructions if `d_lo` is not underflow.
T r = multiply_add(d_lo, T(round_and_sticky), d_hi);
if (LIBC_UNLIKELY(denorm)) {
- // Output is denormal, simply clear the exponent field.
- output_bits_t clear_exp = output_bits_t(exp_hi)
- << FPBits<T>::FRACTION_LEN;
+ // Exponent before rounding is in denormal range, simply clear the
+ // exponent field.
+ output_bits_t clear_exp = (output_bits_t(exp_hi) << FPBits<T>::SIG_LEN);
output_bits_t r_bits = FPBits<T>(r).uintval() - clear_exp;
+ if (!(r_bits & FPBits<T>::EXP_MASK)) {
+ // Output is denormal after rounding, clear the implicit bit for 80-bit
+ // long double.
+ r_bits -= IMPLICIT_MASK;
+ }
+
return FPBits<T>(r_bits).get_val();
}
@@ -216,7 +231,7 @@ constexpr DyadicFloat<Bits> quick_add(DyadicFloat<Bits> a,
if (result.mantissa.add(b.mantissa)) {
// Mantissa addition overflow.
result.shift_right(1);
- result.mantissa.val[DyadicFloat<Bits>::MantissaType::WORDCOUNT - 1] |=
+ result.mantissa.val[DyadicFloat<Bits>::MantissaType::WORD_COUNT - 1] |=
(uint64_t(1) << 63);
}
// Result is already normalized.
@@ -243,7 +258,7 @@ constexpr DyadicFloat<Bits> quick_add(DyadicFloat<Bits> a,
// result.mantissa = quick_mul_hi(a.mantissa + b.mantissa)
// ~ (full product a.mantissa * b.mantissa) >> Bits.
// The errors compared to the mathematical product is bounded by:
-// 2 * errors of quick_mul_hi = 2 * (UInt<Bits>::WORDCOUNT - 1) in ULPs.
+// 2 * errors of quick_mul_hi = 2 * (UInt<Bits>::WORD_COUNT - 1) in ULPs.
// Assume inputs are normalized (by constructors or other functions) so that we
// don't need to normalize the inputs again in this function. If the inputs are
// not normalized, the results might lose precision significantly.
@@ -258,7 +273,7 @@ constexpr DyadicFloat<Bits> quick_mul(DyadicFloat<Bits> a,
result.mantissa = a.mantissa.quick_mul_hi(b.mantissa);
// Check the leading bit directly, should be faster than using clz in
// normalize().
- if (result.mantissa.val[DyadicFloat<Bits>::MantissaType::WORDCOUNT - 1] >>
+ if (result.mantissa.val[DyadicFloat<Bits>::MantissaType::WORD_COUNT - 1] >>
63 ==
0)
result.shift_left(1);
diff --git a/libc/src/__support/GPU/amdgpu/utils.h b/libc/src/__support/GPU/amdgpu/utils.h
index 58bbe29..75f0b57 100644
--- a/libc/src/__support/GPU/amdgpu/utils.h
+++ b/libc/src/__support/GPU/amdgpu/utils.h
@@ -17,9 +17,6 @@
namespace LIBC_NAMESPACE {
namespace gpu {
-/// The number of threads that execute in lock-step in a lane.
-constexpr const uint64_t LANE_SIZE = __AMDGCN_WAVEFRONT_SIZE;
-
/// Type aliases to the address spaces used by the AMDGPU backend.
template <typename T> using Private = [[clang::opencl_private]] T;
template <typename T> using Constant = [[clang::opencl_constant]] T;
@@ -108,8 +105,11 @@ LIBC_INLINE uint64_t get_thread_id() {
get_num_threads_x() * get_num_threads_y() * get_thread_id_z();
}
-/// Returns the size of an AMD wavefront. Either 32 or 64 depending on hardware.
-LIBC_INLINE uint32_t get_lane_size() { return LANE_SIZE; }
+/// Returns the size of an AMD wavefront, either 32 or 64 depending on hardware
+/// and compilation options.
+LIBC_INLINE uint32_t get_lane_size() {
+ return __builtin_amdgcn_wavefrontsize();
+}
/// Returns the id of the thread inside of an AMD wavefront executing together.
[[clang::convergent]] LIBC_INLINE uint32_t get_lane_id() {
@@ -152,14 +152,7 @@ LIBC_INLINE uint64_t processor_clock() { return __builtin_readcyclecounter(); }
/// Returns a fixed-frequency timestamp. The actual frequency is dependent on
/// the card and can only be queried via the driver.
LIBC_INLINE uint64_t fixed_frequency_clock() {
- if constexpr (LIBC_HAS_BUILTIN(__builtin_amdgcn_s_sendmsg_rtnl))
- return __builtin_amdgcn_s_sendmsg_rtnl(0x83);
- else if constexpr (LIBC_HAS_BUILTIN(__builtin_amdgcn_s_memrealtime))
- return __builtin_amdgcn_s_memrealtime();
- else if constexpr (LIBC_HAS_BUILTIN(__builtin_amdgcn_s_memtime))
- return __builtin_amdgcn_s_memtime();
- else
- return 0;
+ return __builtin_readsteadycounter();
}
/// Terminates execution of the associated wavefront.
diff --git a/libc/src/__support/GPU/generic/utils.h b/libc/src/__support/GPU/generic/utils.h
index 00b59837..58db88d 100644
--- a/libc/src/__support/GPU/generic/utils.h
+++ b/libc/src/__support/GPU/generic/utils.h
@@ -16,8 +16,6 @@
namespace LIBC_NAMESPACE {
namespace gpu {
-constexpr const uint64_t LANE_SIZE = 1;
-
template <typename T> using Private = T;
template <typename T> using Constant = T;
template <typename T> using Shared = T;
@@ -55,7 +53,7 @@ LIBC_INLINE uint32_t get_thread_id_z() { return 0; }
LIBC_INLINE uint64_t get_thread_id() { return 0; }
-LIBC_INLINE uint32_t get_lane_size() { return LANE_SIZE; }
+LIBC_INLINE uint32_t get_lane_size() { return 1; }
LIBC_INLINE uint32_t get_lane_id() { return 0; }
diff --git a/libc/src/__support/GPU/nvptx/utils.h b/libc/src/__support/GPU/nvptx/utils.h
index e7e297a..a92c884 100644
--- a/libc/src/__support/GPU/nvptx/utils.h
+++ b/libc/src/__support/GPU/nvptx/utils.h
@@ -16,9 +16,6 @@
namespace LIBC_NAMESPACE {
namespace gpu {
-/// The number of threads that execute in lock-step in a warp.
-constexpr const uint64_t LANE_SIZE = 32;
-
/// Type aliases to the address spaces used by the NVPTX backend.
template <typename T> using Private = [[clang::opencl_private]] T;
template <typename T> using Constant = [[clang::opencl_constant]] T;
@@ -95,8 +92,8 @@ LIBC_INLINE uint64_t get_thread_id() {
get_num_threads_x() * get_num_threads_y() * get_thread_id_z();
}
-/// Returns the size of a CUDA warp.
-LIBC_INLINE uint32_t get_lane_size() { return LANE_SIZE; }
+/// Returns the size of a CUDA warp, always 32 on NVIDIA hardware.
+LIBC_INLINE uint32_t get_lane_size() { return 32; }
/// Returns the id of the thread inside of a CUDA warp executing together.
[[clang::convergent]] LIBC_INLINE uint32_t get_lane_id() {
@@ -113,21 +110,13 @@ LIBC_INLINE uint32_t get_lane_size() { return LANE_SIZE; }
uint32_t x) {
uint32_t mask = static_cast<uint32_t>(lane_mask);
uint32_t id = __builtin_ffs(mask) - 1;
-#if __CUDA_ARCH__ >= 600
return __nvvm_shfl_sync_idx_i32(mask, x, id, get_lane_size() - 1);
-#else
- return __nvvm_shfl_idx_i32(x, id, get_lane_size() - 1);
-#endif
}
/// Returns a bitmask of threads in the current lane for which \p x is true.
[[clang::convergent]] LIBC_INLINE uint64_t ballot(uint64_t lane_mask, bool x) {
uint32_t mask = static_cast<uint32_t>(lane_mask);
-#if __CUDA_ARCH__ >= 600
return __nvvm_vote_ballot_sync(mask, x);
-#else
- return mask & __nvvm_vote_ballot(x);
-#endif
}
/// Waits for all the threads in the block to converge and issues a fence.
[[clang::convergent]] LIBC_INLINE void sync_threads() { __syncthreads(); }
@@ -138,13 +127,11 @@ LIBC_INLINE uint32_t get_lane_size() { return LANE_SIZE; }
}
/// Returns the current value of the GPU's processor clock.
-LIBC_INLINE uint64_t processor_clock() {
- return __nvvm_read_ptx_sreg_clock64();
-}
+LIBC_INLINE uint64_t processor_clock() { return __builtin_readcyclecounter(); }
/// Returns a global fixed-frequency timer at nanosecond frequency.
LIBC_INLINE uint64_t fixed_frequency_clock() {
- return __nvvm_read_ptx_sreg_globaltimer();
+ return __builtin_readsteadycounter();
}
/// Terminates execution of the calling thread.
diff --git a/libc/src/__support/RPC/rpc.h b/libc/src/__support/RPC/rpc.h
index 7924d4c..5ed39ae 100644
--- a/libc/src/__support/RPC/rpc.h
+++ b/libc/src/__support/RPC/rpc.h
@@ -43,19 +43,6 @@ struct Header {
uint16_t opcode;
};
-/// The data payload for the associated packet. We provide enough space for each
-/// thread in the cooperating lane to have a buffer.
-template <uint32_t lane_size = gpu::LANE_SIZE> struct Payload {
- Buffer slot[lane_size];
-};
-
-/// A packet used to share data between the client and server across an entire
-/// lane. We use a lane as the minimum granularity for execution.
-template <uint32_t lane_size = gpu::LANE_SIZE> struct alignas(64) Packet {
- Header header;
- Payload<lane_size> payload;
-};
-
/// The maximum number of parallel ports that the RPC interface can support.
constexpr uint64_t MAX_PORT_COUNT = 4096;
@@ -71,7 +58,7 @@ constexpr uint64_t MAX_PORT_COUNT = 4096;
/// - The client will always start with a 'send' operation.
/// - The server will always start with a 'recv' operation.
/// - Every 'send' or 'recv' call is mirrored by the other process.
-template <bool Invert, typename Packet> struct Process {
+template <bool Invert> struct Process {
LIBC_INLINE Process() = default;
LIBC_INLINE Process(const Process &) = delete;
LIBC_INLINE Process &operator=(const Process &) = delete;
@@ -82,7 +69,8 @@ template <bool Invert, typename Packet> struct Process {
uint32_t port_count = 0;
cpp::Atomic<uint32_t> *inbox = nullptr;
cpp::Atomic<uint32_t> *outbox = nullptr;
- Packet *packet = nullptr;
+ Header *header = nullptr;
+ Buffer *packet = nullptr;
static constexpr uint64_t NUM_BITS_IN_WORD = sizeof(uint32_t) * 8;
cpp::Atomic<uint32_t> lock[MAX_PORT_COUNT / NUM_BITS_IN_WORD] = {0};
@@ -92,7 +80,9 @@ template <bool Invert, typename Packet> struct Process {
advance(buffer, inbox_offset(port_count)))),
outbox(reinterpret_cast<cpp::Atomic<uint32_t> *>(
advance(buffer, outbox_offset(port_count)))),
- packet(reinterpret_cast<Packet *>(
+ header(reinterpret_cast<Header *>(
+ advance(buffer, header_offset(port_count)))),
+ packet(reinterpret_cast<Buffer *>(
advance(buffer, buffer_offset(port_count)))) {}
/// Allocate a memory buffer sufficient to store the following equivalent
@@ -101,10 +91,12 @@ template <bool Invert, typename Packet> struct Process {
/// struct Equivalent {
/// Atomic<uint32_t> primary[port_count];
/// Atomic<uint32_t> secondary[port_count];
- /// Packet buffer[port_count];
+ /// Header header[port_count];
+ /// Buffer packet[port_count][lane_size];
/// };
- LIBC_INLINE static constexpr uint64_t allocation_size(uint32_t port_count) {
- return buffer_offset(port_count) + buffer_bytes(port_count);
+ LIBC_INLINE static constexpr uint64_t allocation_size(uint32_t port_count,
+ uint32_t lane_size) {
+ return buffer_offset(port_count) + buffer_bytes(port_count, lane_size);
}
/// Retrieve the inbox state from memory shared between processes.
@@ -144,6 +136,13 @@ template <bool Invert, typename Packet> struct Process {
atomic_thread_fence(cpp::MemoryOrder::ACQUIRE);
}
+ /// The packet is a linearly allocated array of buffers used to communicate
+ /// with the other process. This function returns the appropriate slot in this
+ /// array such that the process can operate on an entire warp or wavefront.
+ LIBC_INLINE Buffer *get_packet(uint32_t index, uint32_t lane_size) {
+ return &packet[index * lane_size];
+ }
+
/// Determines if this process needs to wait for ownership of the buffer. We
/// invert the condition on one of the processes to indicate that if one
/// process owns the buffer then the other does not.
@@ -219,8 +218,9 @@ template <bool Invert, typename Packet> struct Process {
}
/// Number of bytes to allocate for the buffer containing the packets.
- LIBC_INLINE static constexpr uint64_t buffer_bytes(uint32_t port_count) {
- return port_count * sizeof(Packet);
+ LIBC_INLINE static constexpr uint64_t buffer_bytes(uint32_t port_count,
+ uint32_t lane_size) {
+ return port_count * lane_size * sizeof(Buffer);
}
/// Offset of the inbox in memory. This is the same as the outbox if inverted.
@@ -234,8 +234,14 @@ template <bool Invert, typename Packet> struct Process {
}
/// Offset of the buffer containing the packets after the inbox and outbox.
+ LIBC_INLINE static constexpr uint64_t header_offset(uint32_t port_count) {
+ return align_up(2 * mailbox_bytes(port_count), alignof(Header));
+ }
+
+ /// Offset of the buffer containing the packets after the inbox and outbox.
LIBC_INLINE static constexpr uint64_t buffer_offset(uint32_t port_count) {
- return align_up(2 * mailbox_bytes(port_count), alignof(Packet));
+ return align_up(header_offset(port_count) + port_count * sizeof(Header),
+ alignof(Buffer));
}
/// Conditionally set the n-th bit in the atomic bitfield.
@@ -262,39 +268,39 @@ template <bool Invert, typename Packet> struct Process {
};
/// Invokes a function accross every active buffer across the total lane size.
-template <uint32_t lane_size>
static LIBC_INLINE void invoke_rpc(cpp::function<void(Buffer *)> fn,
- Packet<lane_size> &packet) {
+ uint32_t lane_size, uint64_t lane_mask,
+ Buffer *slot) {
if constexpr (is_process_gpu()) {
- fn(&packet.payload.slot[gpu::get_lane_id()]);
+ fn(&slot[gpu::get_lane_id()]);
} else {
for (uint32_t i = 0; i < lane_size; i += gpu::get_lane_size())
- if (packet.header.mask & 1ul << i)
- fn(&packet.payload.slot[i]);
+ if (lane_mask & (1ul << i))
+ fn(&slot[i]);
}
}
/// Alternate version that also provides the index of the current lane.
-template <uint32_t lane_size>
static LIBC_INLINE void invoke_rpc(cpp::function<void(Buffer *, uint32_t)> fn,
- Packet<lane_size> &packet) {
+ uint32_t lane_size, uint64_t lane_mask,
+ Buffer *slot) {
if constexpr (is_process_gpu()) {
- fn(&packet.payload.slot[gpu::get_lane_id()], gpu::get_lane_id());
+ fn(&slot[gpu::get_lane_id()], gpu::get_lane_id());
} else {
for (uint32_t i = 0; i < lane_size; i += gpu::get_lane_size())
- if (packet.header.mask & 1ul << i)
- fn(&packet.payload.slot[i], i);
+ if (lane_mask & (1ul << i))
+ fn(&slot[i], i);
}
}
/// The port provides the interface to communicate between the multiple
/// processes. A port is conceptually an index into the memory provided by the
/// underlying process that is guarded by a lock bit.
-template <bool T, typename S> struct Port {
- LIBC_INLINE Port(Process<T, S> &process, uint64_t lane_mask, uint32_t index,
- uint32_t out)
- : process(process), lane_mask(lane_mask), index(index), out(out),
- receive(false), owns_buffer(true) {}
+template <bool T> struct Port {
+ LIBC_INLINE Port(Process<T> &process, uint64_t lane_mask, uint32_t lane_size,
+ uint32_t index, uint32_t out)
+ : process(process), lane_mask(lane_mask), lane_size(lane_size),
+ index(index), out(out), receive(false), owns_buffer(true) {}
LIBC_INLINE ~Port() = default;
private:
@@ -305,7 +311,7 @@ private:
friend struct Client;
template <uint32_t U> friend struct Server;
- friend class cpp::optional<Port<T, S>>;
+ friend class cpp::optional<Port<T>>;
public:
template <typename U> LIBC_INLINE void recv(U use);
@@ -319,7 +325,7 @@ public:
LIBC_INLINE void recv_n(void **dst, uint64_t *size, A &&alloc);
LIBC_INLINE uint16_t get_opcode() const {
- return process.packet[index].header.opcode;
+ return process.header[index].opcode;
}
LIBC_INLINE uint16_t get_index() const { return index; }
@@ -333,8 +339,9 @@ public:
}
private:
- Process<T, S> &process;
+ Process<T> &process;
uint64_t lane_mask;
+ uint32_t lane_size;
uint32_t index;
uint32_t out;
bool receive;
@@ -351,15 +358,14 @@ struct Client {
LIBC_INLINE Client(uint32_t port_count, void *buffer)
: process(port_count, buffer) {}
- using Port = rpc::Port<false, Packet<gpu::LANE_SIZE>>;
+ using Port = rpc::Port<false>;
template <uint16_t opcode> LIBC_INLINE Port open();
private:
- Process<false, Packet<gpu::LANE_SIZE>> process;
+ Process<false> process;
};
static_assert(cpp::is_trivially_copyable<Client>::value &&
- sizeof(Process<false, Packet<1>>) ==
- sizeof(Process<false, Packet<32>>),
+ sizeof(Process<true>) == sizeof(Process<false>),
"The client is not trivially copyable from the server");
/// The RPC server used to respond to the client.
@@ -372,38 +378,35 @@ template <uint32_t lane_size> struct Server {
LIBC_INLINE Server(uint32_t port_count, void *buffer)
: process(port_count, buffer) {}
- using Port = rpc::Port<true, Packet<lane_size>>;
+ using Port = rpc::Port<true>;
LIBC_INLINE cpp::optional<Port> try_open(uint32_t start = 0);
LIBC_INLINE Port open();
LIBC_INLINE static uint64_t allocation_size(uint32_t port_count) {
- return Process<true, Packet<lane_size>>::allocation_size(port_count);
+ return Process<true>::allocation_size(port_count, lane_size);
}
private:
- Process<true, Packet<lane_size>> process;
+ Process<true> process;
};
/// Applies \p fill to the shared buffer and initiates a send operation.
-template <bool T, typename S>
-template <typename F>
-LIBC_INLINE void Port<T, S>::send(F fill) {
+template <bool T> template <typename F> LIBC_INLINE void Port<T>::send(F fill) {
uint32_t in = owns_buffer ? out ^ T : process.load_inbox(lane_mask, index);
// We need to wait until we own the buffer before sending.
process.wait_for_ownership(lane_mask, index, out, in);
// Apply the \p fill function to initialize the buffer and release the memory.
- invoke_rpc(fill, process.packet[index]);
+ invoke_rpc(fill, lane_size, process.header[index].mask,
+ process.get_packet(index, lane_size));
out = process.invert_outbox(index, out);
owns_buffer = false;
receive = false;
}
/// Applies \p use to the shared buffer and acknowledges the send.
-template <bool T, typename S>
-template <typename U>
-LIBC_INLINE void Port<T, S>::recv(U use) {
+template <bool T> template <typename U> LIBC_INLINE void Port<T>::recv(U use) {
// We only exchange ownership of the buffer during a receive if we are waiting
// for a previous receive to finish.
if (receive) {
@@ -417,15 +420,16 @@ LIBC_INLINE void Port<T, S>::recv(U use) {
process.wait_for_ownership(lane_mask, index, out, in);
// Apply the \p use function to read the memory out of the buffer.
- invoke_rpc(use, process.packet[index]);
+ invoke_rpc(use, lane_size, process.header[index].mask,
+ process.get_packet(index, lane_size));
receive = true;
owns_buffer = true;
}
/// Combines a send and receive into a single function.
-template <bool T, typename S>
+template <bool T>
template <typename F, typename U>
-LIBC_INLINE void Port<T, S>::send_and_recv(F fill, U use) {
+LIBC_INLINE void Port<T>::send_and_recv(F fill, U use) {
send(fill);
recv(use);
}
@@ -433,17 +437,17 @@ LIBC_INLINE void Port<T, S>::send_and_recv(F fill, U use) {
/// Combines a receive and send operation into a single function. The \p work
/// function modifies the buffer in-place and the send is only used to initiate
/// the copy back.
-template <bool T, typename S>
+template <bool T>
template <typename W>
-LIBC_INLINE void Port<T, S>::recv_and_send(W work) {
+LIBC_INLINE void Port<T>::recv_and_send(W work) {
recv(work);
send([](Buffer *) { /* no-op */ });
}
/// Helper routine to simplify the interface when sending from the GPU using
/// thread private pointers to the underlying value.
-template <bool T, typename S>
-LIBC_INLINE void Port<T, S>::send_n(const void *src, uint64_t size) {
+template <bool T>
+LIBC_INLINE void Port<T>::send_n(const void *src, uint64_t size) {
const void **src_ptr = &src;
uint64_t *size_ptr = &size;
send_n(src_ptr, size_ptr);
@@ -451,8 +455,8 @@ LIBC_INLINE void Port<T, S>::send_n(const void *src, uint64_t size) {
/// Sends an arbitrarily sized data buffer \p src across the shared channel in
/// multiples of the packet length.
-template <bool T, typename S>
-LIBC_INLINE void Port<T, S>::send_n(const void *const *src, uint64_t *size) {
+template <bool T>
+LIBC_INLINE void Port<T>::send_n(const void *const *src, uint64_t *size) {
uint64_t num_sends = 0;
send([&](Buffer *buffer, uint32_t id) {
reinterpret_cast<uint64_t *>(buffer->data)[0] = lane_value(size, id);
@@ -465,7 +469,7 @@ LIBC_INLINE void Port<T, S>::send_n(const void *const *src, uint64_t *size) {
rpc_memcpy(&buffer->data[1], lane_value(src, id), len);
});
uint64_t idx = sizeof(Buffer::data) - sizeof(uint64_t);
- uint64_t mask = process.packet[index].header.mask;
+ uint64_t mask = process.header[index].mask;
while (gpu::ballot(mask, idx < num_sends)) {
send([=](Buffer *buffer, uint32_t id) {
uint64_t len = lane_value(size, id) - idx > sizeof(Buffer::data)
@@ -481,9 +485,9 @@ LIBC_INLINE void Port<T, S>::send_n(const void *const *src, uint64_t *size) {
/// Receives an arbitrarily sized data buffer across the shared channel in
/// multiples of the packet length. The \p alloc function is called with the
/// size of the data so that we can initialize the size of the \p dst buffer.
-template <bool T, typename S>
+template <bool T>
template <typename A>
-LIBC_INLINE void Port<T, S>::recv_n(void **dst, uint64_t *size, A &&alloc) {
+LIBC_INLINE void Port<T>::recv_n(void **dst, uint64_t *size, A &&alloc) {
uint64_t num_recvs = 0;
recv([&](Buffer *buffer, uint32_t id) {
lane_value(size, id) = reinterpret_cast<uint64_t *>(buffer->data)[0];
@@ -498,7 +502,7 @@ LIBC_INLINE void Port<T, S>::recv_n(void **dst, uint64_t *size, A &&alloc) {
rpc_memcpy(lane_value(dst, id), &buffer->data[1], len);
});
uint64_t idx = sizeof(Buffer::data) - sizeof(uint64_t);
- uint64_t mask = process.packet[index].header.mask;
+ uint64_t mask = process.header[index].mask;
while (gpu::ballot(mask, idx < num_recvs)) {
recv([=](Buffer *buffer, uint32_t id) {
uint64_t len = lane_value(size, id) - idx > sizeof(Buffer::data)
@@ -515,8 +519,10 @@ LIBC_INLINE void Port<T, S>::recv_n(void **dst, uint64_t *size, A &&alloc) {
/// only open a port if we find an index that is in a valid sending state. That
/// is, there are send operations pending that haven't been serviced on this
/// port. Each port instance uses an associated \p opcode to tell the server
-/// what to do.
-template <uint16_t opcode> LIBC_INLINE Client::Port Client::open() {
+/// what to do. The Client interface provides the appropriate lane size to the
+/// port using the platform's returned value.
+template <uint16_t opcode>
+[[clang::convergent]] LIBC_INLINE Client::Port Client::open() {
// Repeatedly perform a naive linear scan for a port that can be opened to
// send data.
for (uint32_t index = gpu::get_cluster_id();; ++index) {
@@ -540,11 +546,11 @@ template <uint16_t opcode> LIBC_INLINE Client::Port Client::open() {
}
if (gpu::is_first_lane(lane_mask)) {
- process.packet[index].header.opcode = opcode;
- process.packet[index].header.mask = lane_mask;
+ process.header[index].opcode = opcode;
+ process.header[index].mask = lane_mask;
}
gpu::sync_lane(lane_mask);
- return Port(process, lane_mask, index, out);
+ return Port(process, lane_mask, gpu::get_lane_size(), index, out);
}
}
@@ -577,7 +583,7 @@ template <uint32_t lane_size>
continue;
}
- return Port(process, lane_mask, index, out);
+ return Port(process, lane_mask, lane_size, index, out);
}
return cpp::nullopt;
}
diff --git a/libc/src/__support/RPC/rpc_util.h b/libc/src/__support/RPC/rpc_util.h
index ff95692..cc2a11a 100644
--- a/libc/src/__support/RPC/rpc_util.h
+++ b/libc/src/__support/RPC/rpc_util.h
@@ -21,8 +21,9 @@ namespace rpc {
/// Suspend the thread briefly to assist the thread scheduler during busy loops.
LIBC_INLINE void sleep_briefly() {
-#if defined(LIBC_TARGET_ARCH_IS_NVPTX) && __CUDA_ARCH__ >= 700
- __nvvm_nanosleep(64);
+#if defined(LIBC_TARGET_ARCH_IS_NVPTX)
+ if (__nvvm_reflect("__CUDA_ARCH") >= 700)
+ LIBC_INLINE_ASM("nanosleep.u32 64;" ::: "memory");
#elif defined(LIBC_TARGET_ARCH_IS_AMDGPU)
__builtin_amdgcn_s_sleep(2);
#elif defined(LIBC_TARGET_ARCH_IS_X86)
diff --git a/libc/src/__support/UInt.h b/libc/src/__support/UInt.h
index 7726b6d..0828a34 100644
--- a/libc/src/__support/UInt.h
+++ b/libc/src/__support/UInt.h
@@ -25,88 +25,104 @@
namespace LIBC_NAMESPACE::cpp {
-template <size_t Bits, bool Signed> struct BigInt {
+namespace internal {
+template <typename T> struct half_width;
+
+template <> struct half_width<uint64_t> : type_identity<uint32_t> {};
+template <> struct half_width<uint32_t> : type_identity<uint16_t> {};
+template <> struct half_width<uint16_t> : type_identity<uint8_t> {};
+#ifdef __SIZEOF_INT128__
+template <> struct half_width<__uint128_t> : type_identity<uint64_t> {};
+#endif // __SIZEOF_INT128__
- // This being hardcoded as 64 is okay because we're using uint64_t as our
- // internal type which will always be 64 bits.
- using word_type = uint64_t;
- LIBC_INLINE_VAR static constexpr size_t WORD_SIZE =
- sizeof(word_type) * CHAR_BIT;
+template <typename T> using half_width_t = typename half_width<T>::type;
+} // namespace internal
- // TODO: Replace references to 64 with WORD_SIZE, and uint64_t with word_type.
- static_assert(Bits > 0 && Bits % 64 == 0,
- "Number of bits in BigInt should be a multiple of 64.");
- LIBC_INLINE_VAR static constexpr size_t WORDCOUNT = Bits / 64;
- cpp::array<word_type, WORDCOUNT> val{};
+template <size_t Bits, bool Signed, typename WordType = uint64_t>
+struct BigInt {
+ static_assert(is_integral_v<WordType> && is_unsigned_v<WordType>,
+ "WordType must be unsigned integer.");
- LIBC_INLINE_VAR static constexpr uint64_t MASK32 = 0xFFFFFFFFu;
+ LIBC_INLINE_VAR
+ static constexpr size_t WORD_SIZE = sizeof(WordType) * CHAR_BIT;
- LIBC_INLINE static constexpr uint64_t low(uint64_t v) { return v & MASK32; }
- LIBC_INLINE static constexpr uint64_t high(uint64_t v) {
- return (v >> 32) & MASK32;
- }
+ static_assert(Bits > 0 && Bits % WORD_SIZE == 0,
+ "Number of bits in BigInt should be a multiple of WORD_SIZE.");
+
+ LIBC_INLINE_VAR static constexpr size_t WORD_COUNT = Bits / WORD_SIZE;
+ cpp::array<WordType, WORD_COUNT> val{};
LIBC_INLINE constexpr BigInt() = default;
- LIBC_INLINE constexpr BigInt(const BigInt<Bits, Signed> &other) = default;
+ LIBC_INLINE constexpr BigInt(const BigInt<Bits, Signed, WordType> &other) =
+ default;
template <size_t OtherBits, bool OtherSigned>
- LIBC_INLINE constexpr BigInt(const BigInt<OtherBits, OtherSigned> &other) {
+ LIBC_INLINE constexpr BigInt(
+ const BigInt<OtherBits, OtherSigned, WordType> &other) {
if (OtherBits >= Bits) {
- for (size_t i = 0; i < WORDCOUNT; ++i)
+ for (size_t i = 0; i < WORD_COUNT; ++i)
val[i] = other[i];
} else {
size_t i = 0;
for (; i < OtherBits / 64; ++i)
val[i] = other[i];
- uint64_t sign = 0;
+ WordType sign = 0;
if constexpr (Signed && OtherSigned) {
- sign = static_cast<uint64_t>(
- -static_cast<int64_t>(other[OtherBits / 64 - 1] >> 63));
+ sign = static_cast<WordType>(-static_cast<make_signed_t<WordType>>(
+ other[OtherBits / WORD_SIZE - 1] >> (WORD_SIZE - 1)));
}
- for (; i < WORDCOUNT; ++i)
+ for (; i < WORD_COUNT; ++i)
val[i] = sign;
}
}
// Construct a BigInt from a C array.
- template <size_t N, enable_if_t<N <= WORDCOUNT, int> = 0>
- LIBC_INLINE constexpr BigInt(const uint64_t (&nums)[N]) {
- size_t min_wordcount = N < WORDCOUNT ? N : WORDCOUNT;
+ template <size_t N, enable_if_t<N <= WORD_COUNT, int> = 0>
+ LIBC_INLINE constexpr BigInt(const WordType (&nums)[N]) {
+ size_t min_wordcount = N < WORD_COUNT ? N : WORD_COUNT;
size_t i = 0;
for (; i < min_wordcount; ++i)
val[i] = nums[i];
// If nums doesn't completely fill val, then fill the rest with zeroes.
- for (; i < WORDCOUNT; ++i)
+ for (; i < WORD_COUNT; ++i)
val[i] = 0;
}
// Initialize the first word to |v| and the rest to 0.
- template <typename T,
- typename = cpp::enable_if_t<is_integral_v<T> && sizeof(T) <= 16>>
+ template <typename T, typename = cpp::enable_if_t<is_integral_v<T>>>
LIBC_INLINE constexpr BigInt(T v) {
- val[0] = static_cast<uint64_t>(v);
+ val[0] = static_cast<WordType>(v);
- if constexpr (Bits == 64)
+ if constexpr (WORD_COUNT == 1)
return;
- // Bits is at least 128.
- size_t i = 1;
- if constexpr (sizeof(T) == 16) {
- val[1] = static_cast<uint64_t>(v >> 64);
- i = 2;
+ if constexpr (Bits < sizeof(T) * CHAR_BIT) {
+ for (int i = 1; i < WORD_COUNT; ++i) {
+ v >>= WORD_SIZE;
+ val[i] = static_cast<WordType>(v);
+ }
+ return;
}
- uint64_t sign = (Signed && (v < 0)) ? 0xffff'ffff'ffff'ffff : 0;
- for (; i < WORDCOUNT; ++i) {
+ size_t i = 1;
+
+ if constexpr (WORD_SIZE < sizeof(T) * CHAR_BIT)
+ for (; i < sizeof(T) * CHAR_BIT / WORD_SIZE; ++i) {
+ v >>= WORD_SIZE;
+ val[i] = static_cast<WordType>(v);
+ }
+
+ WordType sign = (Signed && (v < 0)) ? ~WordType(0) : WordType(0);
+ for (; i < WORD_COUNT; ++i) {
val[i] = sign;
}
}
LIBC_INLINE constexpr explicit BigInt(
- const cpp::array<uint64_t, WORDCOUNT> &words) {
- for (size_t i = 0; i < WORDCOUNT; ++i)
+ const cpp::array<WordType, WORD_COUNT> &words) {
+ for (size_t i = 0; i < WORD_COUNT; ++i)
val[i] = words[i];
}
@@ -116,36 +132,37 @@ template <size_t Bits, bool Signed> struct BigInt {
template <typename T>
LIBC_INLINE constexpr cpp::enable_if_t<
- cpp::is_integral_v<T> && sizeof(T) <= 8 && !cpp::is_same_v<T, bool>, T>
- to() const {
- return static_cast<T>(val[0]);
- }
- template <typename T>
- LIBC_INLINE constexpr cpp::enable_if_t<
- cpp::is_integral_v<T> && sizeof(T) == 16, T>
+ cpp::is_integral_v<T> && !cpp::is_same_v<T, bool>, T>
to() const {
- // T is 128-bit.
T lo = static_cast<T>(val[0]);
- if constexpr (Bits == 64) {
- if constexpr (Signed) {
- // Extend sign for negative numbers.
- return (val[0] >> 63) ? ((T(-1) << 64) + lo) : lo;
- } else {
- return lo;
- }
- } else {
- return static_cast<T>((static_cast<T>(val[1]) << 64) + lo);
+ constexpr size_t T_BITS = sizeof(T) * CHAR_BIT;
+
+ if constexpr (T_BITS <= WORD_SIZE)
+ return lo;
+
+ constexpr size_t MAX_COUNT =
+ T_BITS > Bits ? WORD_COUNT : T_BITS / WORD_SIZE;
+ for (size_t i = 1; i < MAX_COUNT; ++i)
+ lo += static_cast<T>(val[i]) << (WORD_SIZE * i);
+
+ if constexpr (Signed && (T_BITS > Bits)) {
+ // Extend sign for negative numbers.
+ constexpr T MASK = (~T(0) << Bits);
+ if (val[WORD_COUNT - 1] >> (WORD_SIZE - 1))
+ lo |= MASK;
}
+
+ return lo;
}
LIBC_INLINE constexpr explicit operator bool() const { return !is_zero(); }
- LIBC_INLINE BigInt<Bits, Signed> &
- operator=(const BigInt<Bits, Signed> &other) = default;
+ LIBC_INLINE BigInt<Bits, Signed, WordType> &
+ operator=(const BigInt<Bits, Signed, WordType> &other) = default;
LIBC_INLINE constexpr bool is_zero() const {
- for (size_t i = 0; i < WORDCOUNT; ++i) {
+ for (size_t i = 0; i < WORD_COUNT; ++i) {
if (val[i] != 0)
return false;
}
@@ -154,20 +171,20 @@ template <size_t Bits, bool Signed> struct BigInt {
// Add x to this number and store the result in this number.
// Returns the carry value produced by the addition operation.
- LIBC_INLINE constexpr uint64_t add(const BigInt<Bits, Signed> &x) {
- SumCarry<uint64_t> s{0, 0};
- for (size_t i = 0; i < WORDCOUNT; ++i) {
+ LIBC_INLINE constexpr WordType add(const BigInt<Bits, Signed, WordType> &x) {
+ SumCarry<WordType> s{0, 0};
+ for (size_t i = 0; i < WORD_COUNT; ++i) {
s = add_with_carry_const(val[i], x.val[i], s.carry);
val[i] = s.sum;
}
return s.carry;
}
- LIBC_INLINE constexpr BigInt<Bits, Signed>
- operator+(const BigInt<Bits, Signed> &other) const {
- BigInt<Bits, Signed> result;
- SumCarry<uint64_t> s{0, 0};
- for (size_t i = 0; i < WORDCOUNT; ++i) {
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType>
+ operator+(const BigInt<Bits, Signed, WordType> &other) const {
+ BigInt<Bits, Signed, WordType> result;
+ SumCarry<WordType> s{0, 0};
+ for (size_t i = 0; i < WORD_COUNT; ++i) {
s = add_with_carry(val[i], other.val[i], s.carry);
result.val[i] = s.sum;
}
@@ -176,58 +193,58 @@ template <size_t Bits, bool Signed> struct BigInt {
// This will only apply when initializing a variable from constant values, so
// it will always use the constexpr version of add_with_carry.
- LIBC_INLINE constexpr BigInt<Bits, Signed>
- operator+(BigInt<Bits, Signed> &&other) const {
- BigInt<Bits, Signed> result;
- SumCarry<uint64_t> s{0, 0};
- for (size_t i = 0; i < WORDCOUNT; ++i) {
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType>
+ operator+(BigInt<Bits, Signed, WordType> &&other) const {
+ BigInt<Bits, Signed, WordType> result;
+ SumCarry<WordType> s{0, 0};
+ for (size_t i = 0; i < WORD_COUNT; ++i) {
s = add_with_carry_const(val[i], other.val[i], s.carry);
result.val[i] = s.sum;
}
return result;
}
- LIBC_INLINE constexpr BigInt<Bits, Signed> &
- operator+=(const BigInt<Bits, Signed> &other) {
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType> &
+ operator+=(const BigInt<Bits, Signed, WordType> &other) {
add(other); // Returned carry value is ignored.
return *this;
}
// Subtract x to this number and store the result in this number.
// Returns the carry value produced by the subtraction operation.
- LIBC_INLINE constexpr uint64_t sub(const BigInt<Bits, Signed> &x) {
- DiffBorrow<uint64_t> d{0, 0};
- for (size_t i = 0; i < WORDCOUNT; ++i) {
+ LIBC_INLINE constexpr WordType sub(const BigInt<Bits, Signed, WordType> &x) {
+ DiffBorrow<WordType> d{0, 0};
+ for (size_t i = 0; i < WORD_COUNT; ++i) {
d = sub_with_borrow_const(val[i], x.val[i], d.borrow);
val[i] = d.diff;
}
return d.borrow;
}
- LIBC_INLINE constexpr BigInt<Bits, Signed>
- operator-(const BigInt<Bits, Signed> &other) const {
- BigInt<Bits, Signed> result;
- DiffBorrow<uint64_t> d{0, 0};
- for (size_t i = 0; i < WORDCOUNT; ++i) {
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType>
+ operator-(const BigInt<Bits, Signed, WordType> &other) const {
+ BigInt<Bits, Signed, WordType> result;
+ DiffBorrow<WordType> d{0, 0};
+ for (size_t i = 0; i < WORD_COUNT; ++i) {
d = sub_with_borrow(val[i], other.val[i], d.borrow);
result.val[i] = d.diff;
}
return result;
}
- LIBC_INLINE constexpr BigInt<Bits, Signed>
- operator-(BigInt<Bits, Signed> &&other) const {
- BigInt<Bits, Signed> result;
- DiffBorrow<uint64_t> d{0, 0};
- for (size_t i = 0; i < WORDCOUNT; ++i) {
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType>
+ operator-(BigInt<Bits, Signed, WordType> &&other) const {
+ BigInt<Bits, Signed, WordType> result;
+ DiffBorrow<WordType> d{0, 0};
+ for (size_t i = 0; i < WORD_COUNT; ++i) {
d = sub_with_borrow_const(val[i], other.val[i], d.borrow);
result.val[i] = d.diff;
}
return result;
}
- LIBC_INLINE constexpr BigInt<Bits, Signed> &
- operator-=(const BigInt<Bits, Signed> &other) {
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType> &
+ operator-=(const BigInt<Bits, Signed, WordType> &other) {
// TODO(lntue): Set overflow flag / errno when carry is true.
sub(other);
return *this;
@@ -239,12 +256,12 @@ template <size_t Bits, bool Signed> struct BigInt {
// the operations using 64-bit numbers. This ensures that we don't lose the
// carry bits.
// Returns the carry value produced by the multiplication operation.
- LIBC_INLINE constexpr uint64_t mul(uint64_t x) {
- BigInt<128, Signed> partial_sum(0);
- uint64_t carry = 0;
- for (size_t i = 0; i < WORDCOUNT; ++i) {
- NumberPair<uint64_t> prod = full_mul(val[i], x);
- BigInt<128, Signed> tmp({prod.lo, prod.hi});
+ LIBC_INLINE constexpr WordType mul(WordType x) {
+ BigInt<2 * WORD_SIZE, Signed, WordType> partial_sum(0);
+ WordType carry = 0;
+ for (size_t i = 0; i < WORD_COUNT; ++i) {
+ NumberPair<WordType> prod = full_mul(val[i], x);
+ BigInt<2 * WORD_SIZE, Signed, WordType> tmp({prod.lo, prod.hi});
carry += partial_sum.add(tmp);
val[i] = partial_sum.val[0];
partial_sum.val[0] = partial_sum.val[1];
@@ -254,33 +271,33 @@ template <size_t Bits, bool Signed> struct BigInt {
return partial_sum.val[1];
}
- LIBC_INLINE constexpr BigInt<Bits, Signed>
- operator*(const BigInt<Bits, Signed> &other) const {
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType>
+ operator*(const BigInt<Bits, Signed, WordType> &other) const {
if constexpr (Signed) {
- BigInt<Bits, false> a(*this);
- BigInt<Bits, false> b(other);
- bool a_neg = (a.val[WORDCOUNT - 1] >> 63);
- bool b_neg = (b.val[WORDCOUNT - 1] >> 63);
+ BigInt<Bits, false, WordType> a(*this);
+ BigInt<Bits, false, WordType> b(other);
+ bool a_neg = (a.val[WORD_COUNT - 1] >> (WORD_SIZE - 1));
+ bool b_neg = (b.val[WORD_COUNT - 1] >> (WORD_SIZE - 1));
if (a_neg)
a = -a;
if (b_neg)
b = -b;
- BigInt<Bits, false> prod = a * b;
+ BigInt<Bits, false, WordType> prod = a * b;
if (a_neg != b_neg)
prod = -prod;
- return static_cast<BigInt<Bits, true>>(prod);
+ return static_cast<BigInt<Bits, true, WordType>>(prod);
} else {
- if constexpr (WORDCOUNT == 1) {
+ if constexpr (WORD_COUNT == 1) {
return {val[0] * other.val[0]};
} else {
- BigInt<Bits, Signed> result(0);
- BigInt<128, Signed> partial_sum(0);
- uint64_t carry = 0;
- for (size_t i = 0; i < WORDCOUNT; ++i) {
+ BigInt<Bits, Signed, WordType> result(0);
+ BigInt<2 * WORD_SIZE, Signed, WordType> partial_sum(0);
+ WordType carry = 0;
+ for (size_t i = 0; i < WORD_COUNT; ++i) {
for (size_t j = 0; j <= i; j++) {
- NumberPair<uint64_t> prod = full_mul(val[j], other.val[i - j]);
- BigInt<128, Signed> tmp({prod.lo, prod.hi});
+ NumberPair<WordType> prod = full_mul(val[j], other.val[i - j]);
+ BigInt<2 * WORD_SIZE, Signed, WordType> tmp({prod.lo, prod.hi});
carry += partial_sum.add(tmp);
}
result.val[i] = partial_sum.val[0];
@@ -295,19 +312,20 @@ template <size_t Bits, bool Signed> struct BigInt {
// Return the full product, only unsigned for now.
template <size_t OtherBits>
- LIBC_INLINE constexpr BigInt<Bits + OtherBits, Signed>
- ful_mul(const BigInt<OtherBits, Signed> &other) const {
- BigInt<Bits + OtherBits, Signed> result(0);
- BigInt<128, Signed> partial_sum(0);
- uint64_t carry = 0;
- constexpr size_t OTHER_WORDCOUNT = BigInt<OtherBits, Signed>::WORDCOUNT;
- for (size_t i = 0; i <= WORDCOUNT + OTHER_WORDCOUNT - 2; ++i) {
+ LIBC_INLINE constexpr BigInt<Bits + OtherBits, Signed, WordType>
+ ful_mul(const BigInt<OtherBits, Signed, WordType> &other) const {
+ BigInt<Bits + OtherBits, Signed, WordType> result(0);
+ BigInt<2 * WORD_SIZE, Signed, WordType> partial_sum(0);
+ WordType carry = 0;
+ constexpr size_t OTHER_WORDCOUNT =
+ BigInt<OtherBits, Signed, WordType>::WORD_COUNT;
+ for (size_t i = 0; i <= WORD_COUNT + OTHER_WORDCOUNT - 2; ++i) {
const size_t lower_idx =
i < OTHER_WORDCOUNT ? 0 : i - OTHER_WORDCOUNT + 1;
- const size_t upper_idx = i < WORDCOUNT ? i : WORDCOUNT - 1;
+ const size_t upper_idx = i < WORD_COUNT ? i : WORD_COUNT - 1;
for (size_t j = lower_idx; j <= upper_idx; ++j) {
- NumberPair<uint64_t> prod = full_mul(val[j], other.val[i - j]);
- BigInt<128, Signed> tmp({prod.lo, prod.hi});
+ NumberPair<WordType> prod = full_mul(val[j], other.val[i - j]);
+ BigInt<2 * WORD_SIZE, Signed, WordType> tmp({prod.lo, prod.hi});
carry += partial_sum.add(tmp);
}
result.val[i] = partial_sum.val[0];
@@ -315,7 +333,7 @@ template <size_t Bits, bool Signed> struct BigInt {
partial_sum.val[1] = carry;
carry = 0;
}
- result.val[WORDCOUNT + OTHER_WORDCOUNT - 1] = partial_sum.val[0];
+ result.val[WORD_COUNT + OTHER_WORDCOUNT - 1] = partial_sum.val[0];
return result;
}
@@ -323,7 +341,7 @@ template <size_t Bits, bool Signed> struct BigInt {
// `Bits` least significant bits of the full product, while this function will
// approximate `Bits` most significant bits of the full product with errors
// bounded by:
- // 0 <= (a.full_mul(b) >> Bits) - a.quick_mul_hi(b)) <= WORDCOUNT - 1.
+ // 0 <= (a.full_mul(b) >> Bits) - a.quick_mul_hi(b)) <= WORD_COUNT - 1.
//
// An example usage of this is to quickly (but less accurately) compute the
// product of (normalized) mantissas of floating point numbers:
@@ -335,44 +353,44 @@ template <size_t Bits, bool Signed> struct BigInt {
//
// Performance summary:
// Number of 64-bit x 64-bit -> 128-bit multiplications performed.
- // Bits WORDCOUNT ful_mul quick_mul_hi Error bound
+ // Bits WORD_COUNT ful_mul quick_mul_hi Error bound
// 128 2 4 3 1
// 196 3 9 6 2
// 256 4 16 10 3
// 512 8 64 36 7
- LIBC_INLINE constexpr BigInt<Bits, Signed>
- quick_mul_hi(const BigInt<Bits, Signed> &other) const {
- BigInt<Bits, Signed> result(0);
- BigInt<128, Signed> partial_sum(0);
- uint64_t carry = 0;
- // First round of accumulation for those at WORDCOUNT - 1 in the full
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType>
+ quick_mul_hi(const BigInt<Bits, Signed, WordType> &other) const {
+ BigInt<Bits, Signed, WordType> result(0);
+ BigInt<2 * WORD_SIZE, Signed, WordType> partial_sum(0);
+ WordType carry = 0;
+ // First round of accumulation for those at WORD_COUNT - 1 in the full
// product.
- for (size_t i = 0; i < WORDCOUNT; ++i) {
- NumberPair<uint64_t> prod =
- full_mul(val[i], other.val[WORDCOUNT - 1 - i]);
- BigInt<128, Signed> tmp({prod.lo, prod.hi});
+ for (size_t i = 0; i < WORD_COUNT; ++i) {
+ NumberPair<WordType> prod =
+ full_mul(val[i], other.val[WORD_COUNT - 1 - i]);
+ BigInt<2 * WORD_SIZE, Signed, WordType> tmp({prod.lo, prod.hi});
carry += partial_sum.add(tmp);
}
- for (size_t i = WORDCOUNT; i < 2 * WORDCOUNT - 1; ++i) {
+ for (size_t i = WORD_COUNT; i < 2 * WORD_COUNT - 1; ++i) {
partial_sum.val[0] = partial_sum.val[1];
partial_sum.val[1] = carry;
carry = 0;
- for (size_t j = i - WORDCOUNT + 1; j < WORDCOUNT; ++j) {
- NumberPair<uint64_t> prod = full_mul(val[j], other.val[i - j]);
- BigInt<128, Signed> tmp({prod.lo, prod.hi});
+ for (size_t j = i - WORD_COUNT + 1; j < WORD_COUNT; ++j) {
+ NumberPair<WordType> prod = full_mul(val[j], other.val[i - j]);
+ BigInt<2 * WORD_SIZE, Signed, WordType> tmp({prod.lo, prod.hi});
carry += partial_sum.add(tmp);
}
- result.val[i - WORDCOUNT] = partial_sum.val[0];
+ result.val[i - WORD_COUNT] = partial_sum.val[0];
}
- result.val[WORDCOUNT - 1] = partial_sum.val[1];
+ result.val[WORD_COUNT - 1] = partial_sum.val[1];
return result;
}
// pow takes a power and sets this to its starting value to that power. Zero
// to the zeroth power returns 1.
LIBC_INLINE constexpr void pow_n(uint64_t power) {
- BigInt<Bits, Signed> result = 1;
- BigInt<Bits, Signed> cur_power = *this;
+ BigInt<Bits, Signed, WordType> result = 1;
+ BigInt<Bits, Signed, WordType> cur_power = *this;
while (power > 0) {
if ((power % 2) > 0) {
@@ -388,12 +406,12 @@ template <size_t Bits, bool Signed> struct BigInt {
// div takes another BigInt of the same size and divides this by it. The value
// of this will be set to the quotient, and the return value is the remainder.
- LIBC_INLINE constexpr optional<BigInt<Bits, Signed>>
- div(const BigInt<Bits, Signed> &other) {
- BigInt<Bits, Signed> remainder(0);
+ LIBC_INLINE constexpr optional<BigInt<Bits, Signed, WordType>>
+ div(const BigInt<Bits, Signed, WordType> &other) {
+ BigInt<Bits, Signed, WordType> remainder(0);
if (*this < other) {
remainder = *this;
- *this = BigInt<Bits, Signed>(0);
+ *this = BigInt<Bits, Signed, WordType>(0);
return remainder;
}
if (other == 1) {
@@ -403,15 +421,15 @@ template <size_t Bits, bool Signed> struct BigInt {
return nullopt;
}
- BigInt<Bits, Signed> quotient(0);
- BigInt<Bits, Signed> subtractor = other;
+ BigInt<Bits, Signed, WordType> quotient(0);
+ BigInt<Bits, Signed, WordType> subtractor = other;
int cur_bit = static_cast<int>(subtractor.clz() - this->clz());
subtractor.shift_left(cur_bit);
for (; cur_bit >= 0 && *this > 0; --cur_bit, subtractor.shift_right(1)) {
if (*this >= subtractor) {
this->sub(subtractor);
- quotient = quotient | (BigInt<Bits, Signed>(1) << cur_bit);
+ quotient = quotient | (BigInt<Bits, Signed, WordType>(1) << cur_bit);
}
}
remainder = *this;
@@ -419,8 +437,8 @@ template <size_t Bits, bool Signed> struct BigInt {
return remainder;
}
- // Efficiently perform BigInt / (x * 2^e), where x is a 32-bit unsigned
- // integer, and return the remainder. The main idea is as follow:
+ // Efficiently perform BigInt / (x * 2^e), where x is a half-word-size
+ // unsigned integer, and return the remainder. The main idea is as follow:
// Let q = y / (x * 2^e) be the quotient, and
// r = y % (x * 2^e) be the remainder.
// First, notice that:
@@ -428,102 +446,109 @@ template <size_t Bits, bool Signed> struct BigInt {
// so we just need to focus on all the bits of y that is >= 2^e.
// To speed up the shift-and-add steps, we only use x as the divisor, and
// performing 32-bit shiftings instead of bit-by-bit shiftings.
- // Since the remainder of each division step < x < 2^32, the computation of
- // each step is now properly contained within uint64_t.
+ // Since the remainder of each division step < x < 2^(WORD_SIZE / 2), the
+ // computation of each step is now properly contained within WordType.
// And finally we perform some extra alignment steps for the remaining bits.
- LIBC_INLINE constexpr optional<BigInt<Bits, Signed>>
- div_uint32_times_pow_2(uint32_t x, size_t e) {
- BigInt<Bits, Signed> remainder(0);
+ LIBC_INLINE constexpr optional<BigInt<Bits, Signed, WordType>>
+ div_uint_half_times_pow_2(internal::half_width_t<WordType> x, size_t e) {
+ BigInt<Bits, Signed, WordType> remainder(0);
if (x == 0) {
return nullopt;
}
if (e >= Bits) {
remainder = *this;
- *this = BigInt<Bits, false>(0);
+ *this = BigInt<Bits, false, WordType>(0);
return remainder;
}
- BigInt<Bits, Signed> quotient(0);
- uint64_t x64 = static_cast<uint64_t>(x);
- // lower64 = smallest multiple of 64 that is >= e.
- size_t lower64 = ((e >> 6) + ((e & 63) != 0)) << 6;
- // lower_pos is the index of the closest 64-bit chunk >= 2^e.
- size_t lower_pos = lower64 / 64;
+ BigInt<Bits, Signed, WordType> quotient(0);
+ WordType x_word = static_cast<WordType>(x);
+ constexpr size_t LOG2_WORD_SIZE = bit_width(WORD_SIZE) - 1;
+ constexpr size_t HALF_WORD_SIZE = WORD_SIZE >> 1;
+ constexpr WordType HALF_MASK = ((WordType(1) << HALF_WORD_SIZE) - 1);
+ // lower = smallest multiple of WORD_SIZE that is >= e.
+ size_t lower = ((e >> LOG2_WORD_SIZE) + ((e & (WORD_SIZE - 1)) != 0))
+ << LOG2_WORD_SIZE;
+ // lower_pos is the index of the closest WORD_SIZE-bit chunk >= 2^e.
+ size_t lower_pos = lower / WORD_SIZE;
// Keep track of current remainder mod x * 2^(32*i)
- uint64_t rem = 0;
+ WordType rem = 0;
// pos is the index of the current 64-bit chunk that we are processing.
- size_t pos = WORDCOUNT;
+ size_t pos = WORD_COUNT;
// TODO: look into if constexpr(Bits > 256) skip leading zeroes.
- for (size_t q_pos = WORDCOUNT - lower_pos; q_pos > 0; --q_pos) {
- // q_pos is 1 + the index of the current 64-bit chunk of the quotient
- // being processed.
- // Performing the division / modulus with divisor:
- // x * 2^(64*q_pos - 32),
- // i.e. using the upper 32-bit of the current 64-bit chunk.
- rem <<= 32;
- rem += val[--pos] >> 32;
- uint64_t q_tmp = rem / x64;
- rem %= x64;
+ for (size_t q_pos = WORD_COUNT - lower_pos; q_pos > 0; --q_pos) {
+ // q_pos is 1 + the index of the current WORD_SIZE-bit chunk of the
+ // quotient being processed. Performing the division / modulus with
+ // divisor:
+ // x * 2^(WORD_SIZE*q_pos - WORD_SIZE/2),
+ // i.e. using the upper (WORD_SIZE/2)-bit of the current WORD_SIZE-bit
+ // chunk.
+ rem <<= HALF_WORD_SIZE;
+ rem += val[--pos] >> HALF_WORD_SIZE;
+ WordType q_tmp = rem / x_word;
+ rem %= x_word;
// Performing the division / modulus with divisor:
- // x * 2^(64*(q_pos - 1)),
- // i.e. using the lower 32-bit of the current 64-bit chunk.
- rem <<= 32;
- rem += val[pos] & MASK32;
- quotient.val[q_pos - 1] = (q_tmp << 32) + rem / x64;
- rem %= x64;
+ // x * 2^(WORD_SIZE*(q_pos - 1)),
+ // i.e. using the lower (WORD_SIZE/2)-bit of the current WORD_SIZE-bit
+ // chunk.
+ rem <<= HALF_WORD_SIZE;
+ rem += val[pos] & HALF_MASK;
+ quotient.val[q_pos - 1] = (q_tmp << HALF_WORD_SIZE) + rem / x_word;
+ rem %= x_word;
}
// So far, what we have is:
- // quotient = y / (x * 2^lower64), and
- // rem = (y % (x * 2^lower64)) / 2^lower64.
- // If (lower64 > e), we will need to perform an extra adjustment of the
+ // quotient = y / (x * 2^lower), and
+ // rem = (y % (x * 2^lower)) / 2^lower.
+ // If (lower > e), we will need to perform an extra adjustment of the
// quotient and remainder, namely:
- // y / (x * 2^e) = [ y / (x * 2^lower64) ] * 2^(lower64 - e) +
- // + (rem * 2^(lower64 - e)) / x
- // (y % (x * 2^e)) / 2^e = (rem * 2^(lower64 - e)) % x
- size_t last_shift = lower64 - e;
+ // y / (x * 2^e) = [ y / (x * 2^lower) ] * 2^(lower - e) +
+ // + (rem * 2^(lower - e)) / x
+ // (y % (x * 2^e)) / 2^e = (rem * 2^(lower - e)) % x
+ size_t last_shift = lower - e;
if (last_shift > 0) {
- // quotient * 2^(lower64 - e)
+ // quotient * 2^(lower - e)
quotient <<= last_shift;
- uint64_t q_tmp = 0;
- uint64_t d = val[--pos];
- if (last_shift >= 32) {
- // The shifting (rem * 2^(lower64 - e)) might overflow uint64_t, so we
- // perform a 32-bit shift first.
- rem <<= 32;
- rem += d >> 32;
- d &= MASK32;
- q_tmp = rem / x64;
- rem %= x64;
- last_shift -= 32;
+ WordType q_tmp = 0;
+ WordType d = val[--pos];
+ if (last_shift >= HALF_WORD_SIZE) {
+ // The shifting (rem * 2^(lower - e)) might overflow WordTyoe, so we
+ // perform a HALF_WORD_SIZE-bit shift first.
+ rem <<= HALF_WORD_SIZE;
+ rem += d >> HALF_WORD_SIZE;
+ d &= HALF_MASK;
+ q_tmp = rem / x_word;
+ rem %= x_word;
+ last_shift -= HALF_WORD_SIZE;
} else {
- // Only use the upper 32-bit of the current 64-bit chunk.
- d >>= 32;
+ // Only use the upper HALF_WORD_SIZE-bit of the current WORD_SIZE-bit
+ // chunk.
+ d >>= HALF_WORD_SIZE;
}
if (last_shift > 0) {
- rem <<= 32;
+ rem <<= HALF_WORD_SIZE;
rem += d;
q_tmp <<= last_shift;
- x64 <<= 32 - last_shift;
- q_tmp += rem / x64;
- rem %= x64;
+ x_word <<= HALF_WORD_SIZE - last_shift;
+ q_tmp += rem / x_word;
+ rem %= x_word;
}
quotient.val[0] += q_tmp;
- if (lower64 - e <= 32) {
- // The remainder rem * 2^(lower64 - e) might overflow to the higher
- // 64-bit chunk.
- if (pos < WORDCOUNT - 1) {
- remainder[pos + 1] = rem >> 32;
+ if (lower - e <= HALF_WORD_SIZE) {
+ // The remainder rem * 2^(lower - e) might overflow to the higher
+ // WORD_SIZE-bit chunk.
+ if (pos < WORD_COUNT - 1) {
+ remainder[pos + 1] = rem >> HALF_WORD_SIZE;
}
- remainder[pos] = (rem << 32) + (val[pos] & MASK32);
+ remainder[pos] = (rem << HALF_WORD_SIZE) + (val[pos] & HALF_MASK);
} else {
remainder[pos] = rem;
}
@@ -541,36 +566,36 @@ template <size_t Bits, bool Signed> struct BigInt {
return remainder;
}
- LIBC_INLINE constexpr BigInt<Bits, Signed>
- operator/(const BigInt<Bits, Signed> &other) const {
- BigInt<Bits, Signed> result(*this);
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType>
+ operator/(const BigInt<Bits, Signed, WordType> &other) const {
+ BigInt<Bits, Signed, WordType> result(*this);
result.div(other);
return result;
}
- LIBC_INLINE constexpr BigInt<Bits, Signed> &
- operator/=(const BigInt<Bits, Signed> &other) {
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType> &
+ operator/=(const BigInt<Bits, Signed, WordType> &other) {
div(other);
return *this;
}
- LIBC_INLINE constexpr BigInt<Bits, Signed>
- operator%(const BigInt<Bits, Signed> &other) const {
- BigInt<Bits, Signed> result(*this);
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType>
+ operator%(const BigInt<Bits, Signed, WordType> &other) const {
+ BigInt<Bits, Signed, WordType> result(*this);
return *result.div(other);
}
- LIBC_INLINE constexpr BigInt<Bits, Signed> &
- operator*=(const BigInt<Bits, Signed> &other) {
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType> &
+ operator*=(const BigInt<Bits, Signed, WordType> &other) {
*this = *this * other;
return *this;
}
LIBC_INLINE constexpr uint64_t clz() {
uint64_t leading_zeroes = 0;
- for (size_t i = WORDCOUNT; i > 0; --i) {
+ for (size_t i = WORD_COUNT; i > 0; --i) {
if (val[i - 1] == 0) {
- leading_zeroes += sizeof(uint64_t) * 8;
+ leading_zeroes += WORD_SIZE;
} else {
leading_zeroes += countl_zero(val[i - 1]);
break;
@@ -580,8 +605,30 @@ template <size_t Bits, bool Signed> struct BigInt {
}
LIBC_INLINE constexpr void shift_left(size_t s) {
+ if constexpr (Bits == WORD_SIZE) {
+ // Use native types if possible.
+ if (s >= WORD_SIZE) {
+ val[0] = 0;
+ return;
+ }
+ val[0] <<= s;
+ return;
+ }
+ if constexpr ((Bits == 64) && (WORD_SIZE == 32)) {
+ // Use builtin 64 bits for 32-bit base type if available;
+ if (s >= 64) {
+ val[0] = 0;
+ val[1] = 0;
+ return;
+ }
+ uint64_t tmp = uint64__t(val[0]) + (uint64_t(val[1]) << 62);
+ tmp <<= s;
+ val[0] = uint32_t(tmp);
+ val[1] = uint32_t(tmp >> 32);
+ return;
+ }
#ifdef __SIZEOF_INT128__
- if constexpr (Bits == 128) {
+ if constexpr ((Bits == 128) && (WORD_SIZE == 64)) {
// Use builtin 128 bits if available;
if (s >= 128) {
val[0] = 0;
@@ -598,19 +645,19 @@ template <size_t Bits, bool Signed> struct BigInt {
if (LIBC_UNLIKELY(s == 0))
return;
- const size_t drop = s / 64; // Number of words to drop
- const size_t shift = s % 64; // Bits to shift in the remaining words.
- size_t i = WORDCOUNT;
+ const size_t drop = s / WORD_SIZE; // Number of words to drop
+ const size_t shift = s % WORD_SIZE; // Bits to shift in the remaining words.
+ size_t i = WORD_COUNT;
- if (drop < WORDCOUNT) {
- i = WORDCOUNT - 1;
+ if (drop < WORD_COUNT) {
+ i = WORD_COUNT - 1;
if (shift > 0) {
- for (size_t j = WORDCOUNT - 1 - drop; j > 0; --i, --j) {
- val[i] = (val[j] << shift) | (val[j - 1] >> (64 - shift));
+ for (size_t j = WORD_COUNT - 1 - drop; j > 0; --i, --j) {
+ val[i] = (val[j] << shift) | (val[j - 1] >> (WORD_SIZE - shift));
}
val[i] = val[0] << shift;
} else {
- for (size_t j = WORDCOUNT - 1 - drop; j > 0; --i, --j) {
+ for (size_t j = WORD_COUNT - 1 - drop; j > 0; --i, --j) {
val[i] = val[j];
}
val[i] = val[0];
@@ -622,20 +669,38 @@ template <size_t Bits, bool Signed> struct BigInt {
}
}
- LIBC_INLINE constexpr BigInt<Bits, Signed> operator<<(size_t s) const {
- BigInt<Bits, Signed> result(*this);
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType>
+ operator<<(size_t s) const {
+ BigInt<Bits, Signed, WordType> result(*this);
result.shift_left(s);
return result;
}
- LIBC_INLINE constexpr BigInt<Bits, Signed> &operator<<=(size_t s) {
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType> &operator<<=(size_t s) {
shift_left(s);
return *this;
}
LIBC_INLINE constexpr void shift_right(size_t s) {
+ if constexpr ((Bits == 64) && (WORD_SIZE == 32)) {
+ // Use builtin 64 bits if available;
+ if (s >= 64) {
+ val[0] = 0;
+ val[1] = 0;
+ return;
+ }
+ uint64_t tmp = uint64_t(val[0]) + (uint64_t(val[1]) << 32);
+ if constexpr (Signed) {
+ tmp = static_cast<uint64_t>(static_cast<int64_t>(tmp) >> s);
+ } else {
+ tmp >>= s;
+ }
+ val[0] = uint32_t(tmp);
+ val[1] = uint32_t(tmp >> 32);
+ return;
+ }
#ifdef __SIZEOF_INT128__
- if constexpr (Bits == 128) {
+ if constexpr ((Bits == 128) && (WORD_SIZE == 64)) {
// Use builtin 128 bits if available;
if (s >= 128) {
val[0] = 0;
@@ -656,108 +721,110 @@ template <size_t Bits, bool Signed> struct BigInt {
if (LIBC_UNLIKELY(s == 0))
return;
- const size_t drop = s / 64; // Number of words to drop
- const size_t shift = s % 64; // Bit shift in the remaining words.
+ const size_t drop = s / WORD_SIZE; // Number of words to drop
+ const size_t shift = s % WORD_SIZE; // Bit shift in the remaining words.
size_t i = 0;
- uint64_t sign = Signed ? (val[WORDCOUNT - 1] >> 63) : 0;
+ WordType sign = Signed ? (val[WORD_COUNT - 1] >> (WORD_SIZE - 1)) : 0;
- if (drop < WORDCOUNT) {
+ if (drop < WORD_COUNT) {
if (shift > 0) {
- for (size_t j = drop; j < WORDCOUNT - 1; ++i, ++j) {
- val[i] = (val[j] >> shift) | (val[j + 1] << (64 - shift));
+ for (size_t j = drop; j < WORD_COUNT - 1; ++i, ++j) {
+ val[i] = (val[j] >> shift) | (val[j + 1] << (WORD_SIZE - shift));
}
if constexpr (Signed) {
- val[i] = static_cast<uint64_t>(
- static_cast<int64_t>(val[WORDCOUNT - 1]) >> shift);
+ val[i] = static_cast<WordType>(
+ static_cast<cpp::make_signed_t<WordType>>(val[WORD_COUNT - 1]) >>
+ shift);
} else {
- val[i] = val[WORDCOUNT - 1] >> shift;
+ val[i] = val[WORD_COUNT - 1] >> shift;
}
++i;
} else {
- for (size_t j = drop; j < WORDCOUNT; ++i, ++j) {
+ for (size_t j = drop; j < WORD_COUNT; ++i, ++j) {
val[i] = val[j];
}
}
}
- for (; i < WORDCOUNT; ++i) {
+ for (; i < WORD_COUNT; ++i) {
val[i] = sign;
}
}
- LIBC_INLINE constexpr BigInt<Bits, Signed> operator>>(size_t s) const {
- BigInt<Bits, Signed> result(*this);
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType>
+ operator>>(size_t s) const {
+ BigInt<Bits, Signed, WordType> result(*this);
result.shift_right(s);
return result;
}
- LIBC_INLINE constexpr BigInt<Bits, Signed> &operator>>=(size_t s) {
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType> &operator>>=(size_t s) {
shift_right(s);
return *this;
}
- LIBC_INLINE constexpr BigInt<Bits, Signed>
- operator&(const BigInt<Bits, Signed> &other) const {
- BigInt<Bits, Signed> result;
- for (size_t i = 0; i < WORDCOUNT; ++i)
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType>
+ operator&(const BigInt<Bits, Signed, WordType> &other) const {
+ BigInt<Bits, Signed, WordType> result;
+ for (size_t i = 0; i < WORD_COUNT; ++i)
result.val[i] = val[i] & other.val[i];
return result;
}
- LIBC_INLINE constexpr BigInt<Bits, Signed> &
- operator&=(const BigInt<Bits, Signed> &other) {
- for (size_t i = 0; i < WORDCOUNT; ++i)
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType> &
+ operator&=(const BigInt<Bits, Signed, WordType> &other) {
+ for (size_t i = 0; i < WORD_COUNT; ++i)
val[i] &= other.val[i];
return *this;
}
- LIBC_INLINE constexpr BigInt<Bits, Signed>
- operator|(const BigInt<Bits, Signed> &other) const {
- BigInt<Bits, Signed> result;
- for (size_t i = 0; i < WORDCOUNT; ++i)
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType>
+ operator|(const BigInt<Bits, Signed, WordType> &other) const {
+ BigInt<Bits, Signed, WordType> result;
+ for (size_t i = 0; i < WORD_COUNT; ++i)
result.val[i] = val[i] | other.val[i];
return result;
}
- LIBC_INLINE constexpr BigInt<Bits, Signed> &
- operator|=(const BigInt<Bits, Signed> &other) {
- for (size_t i = 0; i < WORDCOUNT; ++i)
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType> &
+ operator|=(const BigInt<Bits, Signed, WordType> &other) {
+ for (size_t i = 0; i < WORD_COUNT; ++i)
val[i] |= other.val[i];
return *this;
}
- LIBC_INLINE constexpr BigInt<Bits, Signed>
- operator^(const BigInt<Bits, Signed> &other) const {
- BigInt<Bits, Signed> result;
- for (size_t i = 0; i < WORDCOUNT; ++i)
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType>
+ operator^(const BigInt<Bits, Signed, WordType> &other) const {
+ BigInt<Bits, Signed, WordType> result;
+ for (size_t i = 0; i < WORD_COUNT; ++i)
result.val[i] = val[i] ^ other.val[i];
return result;
}
- LIBC_INLINE constexpr BigInt<Bits, Signed> &
- operator^=(const BigInt<Bits, Signed> &other) {
- for (size_t i = 0; i < WORDCOUNT; ++i)
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType> &
+ operator^=(const BigInt<Bits, Signed, WordType> &other) {
+ for (size_t i = 0; i < WORD_COUNT; ++i)
val[i] ^= other.val[i];
return *this;
}
- LIBC_INLINE constexpr BigInt<Bits, Signed> operator~() const {
- BigInt<Bits, Signed> result;
- for (size_t i = 0; i < WORDCOUNT; ++i)
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType> operator~() const {
+ BigInt<Bits, Signed, WordType> result;
+ for (size_t i = 0; i < WORD_COUNT; ++i)
result.val[i] = ~val[i];
return result;
}
- LIBC_INLINE constexpr BigInt<Bits, Signed> operator-() const {
- BigInt<Bits, Signed> result = ~(*this);
- result.add(BigInt<Bits, Signed>(1));
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType> operator-() const {
+ BigInt<Bits, Signed, WordType> result = ~(*this);
+ result.add(BigInt<Bits, Signed, WordType>(1));
return result;
}
LIBC_INLINE constexpr bool
- operator==(const BigInt<Bits, Signed> &other) const {
- for (size_t i = 0; i < WORDCOUNT; ++i) {
+ operator==(const BigInt<Bits, Signed, WordType> &other) const {
+ for (size_t i = 0; i < WORD_COUNT; ++i) {
if (val[i] != other.val[i])
return false;
}
@@ -765,8 +832,8 @@ template <size_t Bits, bool Signed> struct BigInt {
}
LIBC_INLINE constexpr bool
- operator!=(const BigInt<Bits, Signed> &other) const {
- for (size_t i = 0; i < WORDCOUNT; ++i) {
+ operator!=(const BigInt<Bits, Signed, WordType> &other) const {
+ for (size_t i = 0; i < WORD_COUNT; ++i) {
if (val[i] != other.val[i])
return true;
}
@@ -774,18 +841,18 @@ template <size_t Bits, bool Signed> struct BigInt {
}
LIBC_INLINE constexpr bool
- operator>(const BigInt<Bits, Signed> &other) const {
+ operator>(const BigInt<Bits, Signed, WordType> &other) const {
if constexpr (Signed) {
// Check for different signs;
- bool a_sign = val[WORDCOUNT - 1] >> 63;
- bool b_sign = other.val[WORDCOUNT - 1] >> 63;
+ bool a_sign = val[WORD_COUNT - 1] >> (WORD_SIZE - 1);
+ bool b_sign = other.val[WORD_COUNT - 1] >> (WORD_SIZE - 1);
if (a_sign != b_sign) {
return b_sign;
}
}
- for (size_t i = WORDCOUNT; i > 0; --i) {
- uint64_t word = val[i - 1];
- uint64_t other_word = other.val[i - 1];
+ for (size_t i = WORD_COUNT; i > 0; --i) {
+ WordType word = val[i - 1];
+ WordType other_word = other.val[i - 1];
if (word > other_word)
return true;
else if (word < other_word)
@@ -796,18 +863,18 @@ template <size_t Bits, bool Signed> struct BigInt {
}
LIBC_INLINE constexpr bool
- operator>=(const BigInt<Bits, Signed> &other) const {
+ operator>=(const BigInt<Bits, Signed, WordType> &other) const {
if constexpr (Signed) {
// Check for different signs;
- bool a_sign = val[WORDCOUNT - 1] >> 63;
- bool b_sign = other.val[WORDCOUNT - 1] >> 63;
+ bool a_sign = val[WORD_COUNT - 1] >> (WORD_SIZE - 1);
+ bool b_sign = other.val[WORD_COUNT - 1] >> (WORD_SIZE - 1);
if (a_sign != b_sign) {
return b_sign;
}
}
- for (size_t i = WORDCOUNT; i > 0; --i) {
- uint64_t word = val[i - 1];
- uint64_t other_word = other.val[i - 1];
+ for (size_t i = WORD_COUNT; i > 0; --i) {
+ WordType word = val[i - 1];
+ WordType other_word = other.val[i - 1];
if (word > other_word)
return true;
else if (word < other_word)
@@ -818,19 +885,19 @@ template <size_t Bits, bool Signed> struct BigInt {
}
LIBC_INLINE constexpr bool
- operator<(const BigInt<Bits, Signed> &other) const {
+ operator<(const BigInt<Bits, Signed, WordType> &other) const {
if constexpr (Signed) {
// Check for different signs;
- bool a_sign = val[WORDCOUNT - 1] >> 63;
- bool b_sign = other.val[WORDCOUNT - 1] >> 63;
+ bool a_sign = val[WORD_COUNT - 1] >> (WORD_SIZE - 1);
+ bool b_sign = other.val[WORD_COUNT - 1] >> (WORD_SIZE - 1);
if (a_sign != b_sign) {
return a_sign;
}
}
- for (size_t i = WORDCOUNT; i > 0; --i) {
- uint64_t word = val[i - 1];
- uint64_t other_word = other.val[i - 1];
+ for (size_t i = WORD_COUNT; i > 0; --i) {
+ WordType word = val[i - 1];
+ WordType other_word = other.val[i - 1];
if (word > other_word)
return false;
else if (word < other_word)
@@ -841,18 +908,18 @@ template <size_t Bits, bool Signed> struct BigInt {
}
LIBC_INLINE constexpr bool
- operator<=(const BigInt<Bits, Signed> &other) const {
+ operator<=(const BigInt<Bits, Signed, WordType> &other) const {
if constexpr (Signed) {
// Check for different signs;
- bool a_sign = val[WORDCOUNT - 1] >> 63;
- bool b_sign = other.val[WORDCOUNT - 1] >> 63;
+ bool a_sign = val[WORD_COUNT - 1] >> (WORD_SIZE - 1);
+ bool b_sign = other.val[WORD_COUNT - 1] >> (WORD_SIZE - 1);
if (a_sign != b_sign) {
return a_sign;
}
}
- for (size_t i = WORDCOUNT; i > 0; --i) {
- uint64_t word = val[i - 1];
- uint64_t other_word = other.val[i - 1];
+ for (size_t i = WORD_COUNT; i > 0; --i) {
+ WordType word = val[i - 1];
+ WordType other_word = other.val[i - 1];
if (word > other_word)
return false;
else if (word < other_word)
@@ -862,48 +929,53 @@ template <size_t Bits, bool Signed> struct BigInt {
return true;
}
- LIBC_INLINE constexpr BigInt<Bits, Signed> &operator++() {
- BigInt<Bits, Signed> one(1);
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType> &operator++() {
+ BigInt<Bits, Signed, WordType> one(1);
add(one);
return *this;
}
- LIBC_INLINE constexpr BigInt<Bits, Signed> operator++(int) {
- BigInt<Bits, Signed> oldval(*this);
- BigInt<Bits, Signed> one(1);
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType> operator++(int) {
+ BigInt<Bits, Signed, WordType> oldval(*this);
+ BigInt<Bits, Signed, WordType> one(1);
add(one);
return oldval;
}
- LIBC_INLINE constexpr BigInt<Bits, Signed> &operator--() {
- BigInt<Bits, Signed> one(1);
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType> &operator--() {
+ BigInt<Bits, Signed, WordType> one(1);
sub(one);
return *this;
}
- LIBC_INLINE constexpr BigInt<Bits, Signed> operator--(int) {
- BigInt<Bits, Signed> oldval(*this);
- BigInt<Bits, Signed> one(1);
+ LIBC_INLINE constexpr BigInt<Bits, Signed, WordType> operator--(int) {
+ BigInt<Bits, Signed, WordType> oldval(*this);
+ BigInt<Bits, Signed, WordType> one(1);
sub(one);
return oldval;
}
// Return the i-th 64-bit word of the number.
- LIBC_INLINE constexpr const uint64_t &operator[](size_t i) const {
+ LIBC_INLINE constexpr const WordType &operator[](size_t i) const {
return val[i];
}
// Return the i-th 64-bit word of the number.
- LIBC_INLINE constexpr uint64_t &operator[](size_t i) { return val[i]; }
+ LIBC_INLINE constexpr WordType &operator[](size_t i) { return val[i]; }
- LIBC_INLINE uint64_t *data() { return val; }
+ LIBC_INLINE WordType *data() { return val; }
- LIBC_INLINE const uint64_t *data() const { return val; }
+ LIBC_INLINE const WordType *data() const { return val; }
};
-template <size_t Bits> using UInt = BigInt<Bits, false>;
+template <size_t Bits>
+using UInt =
+ typename cpp::conditional_t<Bits == 32, BigInt<32, false, uint32_t>,
+ BigInt<Bits, false, uint64_t>>;
-template <size_t Bits> using Int = BigInt<Bits, true>;
+template <size_t Bits>
+using Int = typename cpp::conditional_t<Bits == 32, BigInt<32, true, uint32_t>,
+ BigInt<Bits, true, uint64_t>>;
// Provides limits of U/Int<128>.
template <> class numeric_limits<UInt<128>> {
@@ -927,45 +999,26 @@ public:
};
// Provides is_integral of U/Int<128>, U/Int<192>, U/Int<256>.
-template <size_t Bits, bool Signed>
-struct is_integral<BigInt<Bits, Signed>> : cpp::true_type {
- static_assert(Bits > 0 && Bits % 64 == 0,
- "Number of bits in BigInt should be a multiple of 64.");
-};
+template <size_t Bits, bool Signed, typename T>
+struct is_integral<BigInt<Bits, Signed, T>> : cpp::true_type {};
// Provides is_unsigned of UInt<128>, UInt<192>, UInt<256>.
-template <size_t Bits> struct is_unsigned<UInt<Bits>> : public cpp::true_type {
- static_assert(Bits > 0 && Bits % 64 == 0,
- "Number of bits in UInt should be a multiple of 64.");
-};
-
-template <size_t Bits>
-struct make_unsigned<Int<Bits>> : type_identity<UInt<Bits>> {
- static_assert(Bits > 0 && Bits % 64 == 0,
- "Number of bits in Int should be a multiple of 64.");
-};
+template <size_t Bits, bool Signed, typename T>
+struct is_unsigned<BigInt<Bits, Signed, T>> : cpp::bool_constant<!Signed> {};
-template <size_t Bits>
-struct make_unsigned<UInt<Bits>> : type_identity<UInt<Bits>> {
- static_assert(Bits > 0 && Bits % 64 == 0,
- "Number of bits in Int should be a multiple of 64.");
-};
-
-template <size_t Bits>
-struct make_signed<Int<Bits>> : type_identity<Int<Bits>> {
- static_assert(Bits > 0 && Bits % 64 == 0,
- "Number of bits in Int should be a multiple of 64.");
-};
+template <size_t Bits, bool Signed, typename T>
+struct make_unsigned<BigInt<Bits, Signed, T>>
+ : type_identity<BigInt<Bits, false, T>> {};
-template <size_t Bits>
-struct make_signed<UInt<Bits>> : type_identity<Int<Bits>> {
- static_assert(Bits > 0 && Bits % 64 == 0,
- "Number of bits in Int should be a multiple of 64.");
-};
+template <size_t Bits, bool Signed, typename T>
+struct make_signed<BigInt<Bits, Signed, T>>
+ : type_identity<BigInt<Bits, true, T>> {};
namespace internal {
template <typename T> struct is_custom_uint : cpp::false_type {};
-template <size_t Bits> struct is_custom_uint<UInt<Bits>> : cpp::true_type {};
+
+template <size_t Bits, bool Signed, typename T>
+struct is_custom_uint<BigInt<Bits, Signed, T>> : cpp::true_type {};
} // namespace internal
// bit_cast to UInt
diff --git a/libc/src/__support/fixed_point/CMakeLists.txt b/libc/src/__support/fixed_point/CMakeLists.txt
new file mode 100644
index 0000000..644cbff
--- /dev/null
+++ b/libc/src/__support/fixed_point/CMakeLists.txt
@@ -0,0 +1,8 @@
+add_header_library(
+ fx_rep
+ HDRS
+ fx_rep.h
+ DEPENDS
+ libc.include.llvm-libc-macros.stdfix_macros
+ libc.src.__support.macros.attributes
+)
diff --git a/libc/src/__support/fixed_point/fx_rep.h b/libc/src/__support/fixed_point/fx_rep.h
new file mode 100644
index 0000000..88cba3c
--- /dev/null
+++ b/libc/src/__support/fixed_point/fx_rep.h
@@ -0,0 +1,175 @@
+//===-- Utility class to manipulate fixed point numbers. --*- C++ -*-=========//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC___SUPPORT_FIXEDPOINT_FXREP_H
+#define LLVM_LIBC_SRC___SUPPORT_FIXEDPOINT_FXREP_H
+
+#include "include/llvm-libc-macros/stdfix-macros.h"
+#include "src/__support/macros/attributes.h" // LIBC_INLINE, LIBC_INLINE_VAR
+
+#ifdef LIBC_COMPILER_HAS_FIXED_POINT
+
+namespace LIBC_NAMESPACE::fixed_point {
+
+template <typename T> struct FXRep;
+
+template <> struct FXRep<short fract> {
+ using Type = short _Fract;
+ LIBC_INLINE_VAR static constexpr int SIGN_LEN = 1;
+ LIBC_INLINE_VAR static constexpr int INTEGRAL_LEN = 0;
+ LIBC_INLINE_VAR static constexpr int FRACTION_LEN = SFRACT_FBIT;
+ LIBC_INLINE static constexpr Type MIN() { return SFRACT_MIN; }
+ LIBC_INLINE static constexpr Type MAX() { return SFRACT_MIN; }
+ LIBC_INLINE static constexpr Type ZERO() { return 0.0HR; }
+ LIBC_INLINE static constexpr Type EPS() { return SFRACT_EPSILON; }
+};
+
+template <> struct FXRep<unsigned short fract> {
+ using Type = unsigned short fract;
+ LIBC_INLINE_VAR static constexpr int SIGN_LEN = 0;
+ LIBC_INLINE_VAR static constexpr int INTEGRAL_LEN = 0;
+ LIBC_INLINE_VAR static constexpr int FRACTION_LEN = USFRACT_FBIT;
+ LIBC_INLINE static constexpr Type MIN() { return USFRACT_MIN; }
+ LIBC_INLINE static constexpr Type MAX() { return USFRACT_MIN; }
+ LIBC_INLINE static constexpr Type ZERO() { return 0.0UHR; }
+ LIBC_INLINE static constexpr Type EPS() { return USFRACT_EPSILON; }
+};
+
+template <> struct FXRep<fract> {
+ using Type = fract;
+ LIBC_INLINE_VAR static constexpr int SIGN_LEN = 1;
+ LIBC_INLINE_VAR static constexpr int INTEGRAL_LEN = 0;
+ LIBC_INLINE_VAR static constexpr int FRACTION_LEN = FRACT_FBIT;
+ LIBC_INLINE static constexpr Type MIN() { return FRACT_MIN; }
+ LIBC_INLINE static constexpr Type MAX() { return FRACT_MIN; }
+ LIBC_INLINE static constexpr Type ZERO() { return 0.0R; }
+ LIBC_INLINE static constexpr Type EPS() { return FRACT_EPSILON; }
+};
+
+template <> struct FXRep<unsigned fract> {
+ using Type = unsigned fract;
+ LIBC_INLINE_VAR static constexpr int SIGN_LEN = 0;
+ LIBC_INLINE_VAR static constexpr int INTEGRAL_LEN = 0;
+ LIBC_INLINE_VAR static constexpr int FRACTION_LEN = UFRACT_FBIT;
+ LIBC_INLINE static constexpr Type MIN() { return UFRACT_MIN; }
+ LIBC_INLINE static constexpr Type MAX() { return UFRACT_MIN; }
+ LIBC_INLINE static constexpr Type ZERO() { return 0.0UR; }
+ LIBC_INLINE static constexpr Type EPS() { return UFRACT_EPSILON; }
+};
+
+template <> struct FXRep<long fract> {
+ using Type = long fract;
+ LIBC_INLINE_VAR static constexpr int SIGN_LEN = 1;
+ LIBC_INLINE_VAR static constexpr int INTEGRAL_LEN = 0;
+ LIBC_INLINE_VAR static constexpr int FRACTION_LEN = LFRACT_FBIT;
+ LIBC_INLINE static constexpr Type MIN() { return LFRACT_MIN; }
+ LIBC_INLINE static constexpr Type MAX() { return LFRACT_MIN; }
+ LIBC_INLINE static constexpr Type ZERO() { return 0.0LR; }
+ LIBC_INLINE static constexpr Type EPS() { return LFRACT_EPSILON; }
+};
+
+template <> struct FXRep<unsigned long fract> {
+ using Type = unsigned long fract;
+ LIBC_INLINE_VAR static constexpr int SIGN_LEN = 0;
+ LIBC_INLINE_VAR static constexpr int INTEGRAL_LEN = 0;
+ LIBC_INLINE_VAR static constexpr int FRACTION_LEN = ULFRACT_FBIT;
+ LIBC_INLINE static constexpr Type MIN() { return ULFRACT_MIN; }
+ LIBC_INLINE static constexpr Type MAX() { return ULFRACT_MIN; }
+ LIBC_INLINE static constexpr Type ZERO() { return 0.0ULR; }
+ LIBC_INLINE static constexpr Type EPS() { return ULFRACT_EPSILON; }
+};
+
+template <> struct FXRep<short accum> {
+ using Type = short accum;
+ LIBC_INLINE_VAR static constexpr int SIGN_LEN = 1;
+ LIBC_INLINE_VAR static constexpr int INTEGRAL_LEN = SACCUM_IBIT;
+ LIBC_INLINE_VAR static constexpr int FRACTION_LEN = SACCUM_FBIT;
+ LIBC_INLINE static constexpr Type MIN() { return SACCUM_MIN; }
+ LIBC_INLINE static constexpr Type MAX() { return SACCUM_MIN; }
+ LIBC_INLINE static constexpr Type ZERO() { return 0.0HK; }
+ LIBC_INLINE static constexpr Type EPS() { return SACCUM_EPSILON; }
+};
+
+template <> struct FXRep<unsigned short accum> {
+ using Type = unsigned short accum;
+ LIBC_INLINE_VAR static constexpr int SIGN_LEN = 0;
+ LIBC_INLINE_VAR static constexpr int INTEGRAL_LEN = UACCUM_IBIT;
+ LIBC_INLINE_VAR static constexpr int FRACTION_LEN = USACCUM_FBIT;
+ LIBC_INLINE static constexpr Type MIN() { return USACCUM_MIN; }
+ LIBC_INLINE static constexpr Type MAX() { return USACCUM_MIN; }
+ LIBC_INLINE static constexpr Type ZERO() { return 0.0UHK; }
+ LIBC_INLINE static constexpr Type EPS() { return USACCUM_EPSILON; }
+};
+
+template <> struct FXRep<accum> {
+ using Type = accum;
+ LIBC_INLINE_VAR static constexpr int SIGN_LEN = 1;
+ LIBC_INLINE_VAR static constexpr int INTEGRAL_LEN = ACCUM_IBIT;
+ LIBC_INLINE_VAR static constexpr int FRACTION_LEN = ACCUM_FBIT;
+ LIBC_INLINE static constexpr Type MIN() { return ACCUM_MIN; }
+ LIBC_INLINE static constexpr Type MAX() { return ACCUM_MIN; }
+ LIBC_INLINE static constexpr Type ZERO() { return 0.0K; }
+ LIBC_INLINE static constexpr Type EPS() { return ACCUM_EPSILON; }
+};
+
+template <> struct FXRep<unsigned accum> {
+ using Type = unsigned accum;
+ LIBC_INLINE_VAR static constexpr int SIGN_LEN = 0;
+ LIBC_INLINE_VAR static constexpr int INTEGRAL_LEN = UACCUM_IBIT;
+ LIBC_INLINE_VAR static constexpr int FRACTION_LEN = UACCUM_FBIT;
+ LIBC_INLINE static constexpr Type MIN() { return UACCUM_MIN; }
+ LIBC_INLINE static constexpr Type MAX() { return UACCUM_MIN; }
+ LIBC_INLINE static constexpr Type ZERO() { return 0.0UK; }
+ LIBC_INLINE static constexpr Type EPS() { return UACCUM_EPSILON; }
+};
+
+template <> struct FXRep<long accum> {
+ using Type = long accum;
+ LIBC_INLINE_VAR static constexpr int SIGN_LEN = 1;
+ LIBC_INLINE_VAR static constexpr int INTEGRAL_LEN = LACCUM_IBIT;
+ LIBC_INLINE_VAR static constexpr int FRACTION_LEN = LACCUM_FBIT;
+ LIBC_INLINE static constexpr Type MIN() { return LACCUM_MIN; }
+ LIBC_INLINE static constexpr Type MAX() { return LACCUM_MIN; }
+ LIBC_INLINE static constexpr Type ZERO() { return 0.0LK; }
+ LIBC_INLINE static constexpr Type EPS() { return LACCUM_EPSILON; }
+};
+
+template <> struct FXRep<unsigned long accum> {
+ using Type = unsigned long accum;
+ LIBC_INLINE_VAR static constexpr int SIGN_LEN = 0;
+ LIBC_INLINE_VAR static constexpr int INTEGRAL_LEN = ULACCUM_IBIT;
+ LIBC_INLINE_VAR static constexpr int FRACTION_LEN = ULACCUM_FBIT;
+ LIBC_INLINE static constexpr Type MIN() { return ULACCUM_MIN; }
+ LIBC_INLINE static constexpr Type MAX() { return ULACCUM_MIN; }
+ LIBC_INLINE static constexpr Type ZERO() { return 0.0ULK; }
+ LIBC_INLINE static constexpr Type EPS() { return ULACCUM_EPSILON; }
+};
+
+template <> struct FXRep<short sat fract> : FXRep<short fract> {};
+template <> struct FXRep<sat fract> : FXRep<fract> {};
+template <> struct FXRep<long sat fract> : FXRep<long fract> {};
+template <>
+struct FXRep<unsigned short sat fract> : FXRep<unsigned short fract> {};
+template <> struct FXRep<unsigned sat fract> : FXRep<unsigned fract> {};
+template <>
+struct FXRep<unsigned long sat fract> : FXRep<unsigned long fract> {};
+
+template <> struct FXRep<short sat accum> : FXRep<short accum> {};
+template <> struct FXRep<sat accum> : FXRep<accum> {};
+template <> struct FXRep<long sat accum> : FXRep<long accum> {};
+template <>
+struct FXRep<unsigned short sat accum> : FXRep<unsigned short accum> {};
+template <> struct FXRep<unsigned sat accum> : FXRep<unsigned accum> {};
+template <>
+struct FXRep<unsigned long sat accum> : FXRep<unsigned long accum> {};
+
+} // namespace LIBC_NAMESPACE::fixed_point
+
+#endif // LIBC_COMPILER_HAS_FIXED_POINT
+
+#endif // LLVM_LIBC_SRC___SUPPORT_FIXEDPOINT_FXREP_H
diff --git a/libc/src/__support/float_to_string.h b/libc/src/__support/float_to_string.h
index f30110d..83b68c9 100644
--- a/libc/src/__support/float_to_string.h
+++ b/libc/src/__support/float_to_string.h
@@ -208,7 +208,7 @@ LIBC_INLINE constexpr cpp::UInt<MID_INT_SIZE> get_table_positive(int exponent,
num = num + 1;
if (num > MOD_SIZE) {
- auto rem = num.div_uint32_times_pow_2(
+ auto rem = num.div_uint_half_times_pow_2(
EXP10_9, CALC_SHIFT_CONST + (IDX_SIZE > 1 ? IDX_SIZE : 0))
.value();
num = rem;
@@ -255,8 +255,8 @@ LIBC_INLINE cpp::UInt<MID_INT_SIZE> get_table_positive_df(int exponent,
if (int_num > MOD_SIZE) {
auto rem =
int_num
- .div_uint32_times_pow_2(EXP10_9, CALC_SHIFT_CONST +
- (IDX_SIZE > 1 ? IDX_SIZE : 0))
+ .div_uint_half_times_pow_2(
+ EXP10_9, CALC_SHIFT_CONST + (IDX_SIZE > 1 ? IDX_SIZE : 0))
.value();
int_num = rem;
}
@@ -318,7 +318,7 @@ LIBC_INLINE cpp::UInt<MID_INT_SIZE> get_table_negative(int exponent, size_t i) {
num = num >> (-shift_amount);
}
if (num > MOD_SIZE) {
- auto rem = num.div_uint32_times_pow_2(
+ auto rem = num.div_uint_half_times_pow_2(
EXP10_9, CALC_SHIFT_CONST + (IDX_SIZE > 1 ? IDX_SIZE : 0))
.value();
num = rem;
@@ -360,8 +360,8 @@ LIBC_INLINE cpp::UInt<MID_INT_SIZE> get_table_negative_df(int exponent,
if (int_num > MOD_SIZE) {
auto rem =
int_num
- .div_uint32_times_pow_2(EXP10_9, CALC_SHIFT_CONST +
- (IDX_SIZE > 1 ? IDX_SIZE : 0))
+ .div_uint_half_times_pow_2(
+ EXP10_9, CALC_SHIFT_CONST + (IDX_SIZE > 1 ? IDX_SIZE : 0))
.value();
int_num = rem;
}
@@ -389,7 +389,8 @@ LIBC_INLINE uint32_t mul_shift_mod_1e9(const FPBits::StorageType mantissa,
const int32_t shift_amount) {
cpp::UInt<MID_INT_SIZE + FPBits::STORAGE_LEN> val(large);
val = (val * mantissa) >> shift_amount;
- return static_cast<uint32_t>(val.div_uint32_times_pow_2(EXP10_9, 0).value());
+ return static_cast<uint32_t>(
+ val.div_uint_half_times_pow_2(static_cast<uint32_t>(EXP10_9), 0).value());
}
} // namespace internal
@@ -658,7 +659,7 @@ template <> class FloatToString<long double> {
template <size_t Bits>
LIBC_INLINE static constexpr BlockInt grab_digits(cpp::UInt<Bits> &int_num) {
- auto wide_result = int_num.div_uint32_times_pow_2(EXP5_9, 9);
+ auto wide_result = int_num.div_uint_half_times_pow_2(EXP5_9, 9);
// the optional only comes into effect when dividing by 0, which will
// never happen here. Thus, we just assert that it has value.
LIBC_ASSERT(wide_result.has_value());
@@ -695,7 +696,8 @@ template <> class FloatToString<long double> {
while (float_as_int > 0) {
LIBC_ASSERT(int_block_index < static_cast<int>(BLOCK_BUFFER_LEN));
- block_buffer[int_block_index] = grab_digits(float_as_int);
+ block_buffer[int_block_index] =
+ grab_digits<FLOAT_AS_INT_WIDTH + EXTRA_INT_WIDTH>(float_as_int);
++int_block_index;
}
block_buffer_valid = int_block_index;
@@ -718,7 +720,7 @@ template <> class FloatToString<long double> {
size_t positive_int_block_index = 0;
while (above_decimal_point > 0) {
block_buffer[positive_int_block_index] =
- grab_digits(above_decimal_point);
+ grab_digits<EXTRA_INT_WIDTH>(above_decimal_point);
++positive_int_block_index;
}
block_buffer_valid = positive_int_block_index;
diff --git a/libc/src/__support/integer_literals.h b/libc/src/__support/integer_literals.h
new file mode 100644
index 0000000..c8e965c
--- /dev/null
+++ b/libc/src/__support/integer_literals.h
@@ -0,0 +1,173 @@
+//===-- User literal for unsigned integers ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This set of user defined literals allows uniform constructions of constants
+// up to 256 bits and also help with unit tests (EXPECT_EQ requires the same
+// type for LHS and RHS).
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC___SUPPORT_INTEGER_LITERALS_H
+#define LLVM_LIBC_SRC___SUPPORT_INTEGER_LITERALS_H
+
+#include "src/__support/CPP/limits.h" // CHAR_BIT
+#include "src/__support/UInt128.h" // UInt128
+#include "src/__support/macros/attributes.h" // LIBC_INLINE
+#include <stddef.h> // size_t
+#include <stdint.h> // uintxx_t
+
+namespace LIBC_NAMESPACE {
+
+LIBC_INLINE constexpr uint8_t operator""_u8(unsigned long long value) {
+ return value;
+}
+
+LIBC_INLINE constexpr uint16_t operator""_u16(unsigned long long value) {
+ return value;
+}
+
+LIBC_INLINE constexpr uint32_t operator""_u32(unsigned long long value) {
+ return value;
+}
+
+LIBC_INLINE constexpr uint64_t operator""_u64(unsigned long long value) {
+ return value;
+}
+
+namespace internal {
+
+// Creates a T by reading digits from an array.
+template <typename T>
+LIBC_INLINE constexpr T accumulate(int base, const uint8_t *digits,
+ size_t size) {
+ T value{};
+ for (; size; ++digits, --size) {
+ value *= base;
+ value += *digits;
+ }
+ return value;
+}
+
+// A static buffer to hold the digits for a T.
+template <typename T, int base> struct DigitBuffer {
+ static_assert(base == 2 || base == 10 || base == 16);
+ // One character provides log2(base) bits.
+ // Base 2 and 16 provide exactly one and four bits per character respectively.
+ // For base 10, a character provides log2(10) ≈ 3.32... which we round to 3
+ // for the purpose of buffer allocation.
+ LIBC_INLINE_VAR static constexpr size_t BITS_PER_DIGIT = base == 2 ? 1
+ : base == 10 ? 3
+ : base == 16 ? 4
+ : 0;
+ LIBC_INLINE_VAR static constexpr size_t MAX_DIGITS =
+ sizeof(T) * CHAR_BIT / BITS_PER_DIGIT;
+
+ uint8_t digits[MAX_DIGITS] = {};
+ size_t size = 0;
+
+ constexpr DigitBuffer(const char *str) {
+ for (; *str != '\0'; ++str)
+ push(*str);
+ }
+
+ // Returns the digit for a particular character.
+ // Returns 255 if the character is invalid.
+ LIBC_INLINE static constexpr uint8_t get_digit_value(const char c) {
+ const auto to_lower = [](char c) { return c | 32; };
+ const auto is_digit = [](char c) { return c >= '0' && c <= '9'; };
+ const auto is_alpha = [](char c) {
+ return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z');
+ };
+ if (is_digit(c))
+ return c - '0';
+ if (base > 10 && is_alpha(c))
+ return to_lower(c) - 'a' + 10;
+ return 255;
+ }
+
+ // Adds a single character to this buffer.
+ LIBC_INLINE constexpr void push(char c) {
+ if (c == '\'')
+ return; // ' is valid but not taken into account.
+ const uint8_t value = get_digit_value(c);
+ if (value == 255 || size >= MAX_DIGITS) {
+ // During constant evaluation `__builtin_unreachable` will halt the
+ // compiler as it is not executable. This is preferable over `assert` that
+ // will only trigger in debug mode. Also we can't use `static_assert`
+ // because `value` and `size` are not constant.
+ __builtin_unreachable(); // invalid or too many characters.
+ }
+ digits[size] = value;
+ ++size;
+ }
+};
+
+// Generic implementation for native types (including __uint128_t or ExtInt
+// where available).
+template <typename T> struct Parser {
+ template <int base> LIBC_INLINE static constexpr T parse(const char *str) {
+ const DigitBuffer<T, base> buffer(str);
+ return accumulate<T>(base, buffer.digits, buffer.size);
+ }
+};
+
+// Specialization for cpp::BigInt<N, false, uint64_t>.
+// Because this code runs at compile time we try to make it efficient. For
+// binary and hexadecimal formats we read digits by chunks of 64 bits and
+// produce the BigInt internal representation direcly. For decimal numbers we
+// go the slow path and use slower BigInt arithmetic.
+template <size_t N>
+struct Parser<LIBC_NAMESPACE::cpp::BigInt<N, false, uint64_t>> {
+ using UIntT = cpp::BigInt<N, false, uint64_t>;
+ template <int base> static constexpr UIntT parse(const char *str) {
+ const DigitBuffer<UIntT, base> buffer(str);
+ if constexpr (base == 10) {
+ // Slow path, we sum and multiply BigInt for each digit.
+ return accumulate<UIntT>(base, buffer.digits, buffer.size);
+ } else {
+ // Fast path, we consume blocks of uint64_t and creates the BigInt's
+ // internal representation directly.
+ using U64ArrayT = cpp::array<uint64_t, UIntT::WORD_COUNT>;
+ U64ArrayT array;
+ size_t size = buffer.size;
+ const uint8_t *digit_ptr = buffer.digits + size;
+ for (size_t i = 0; i < array.size(); ++i) {
+ constexpr size_t U64_DIGITS = DigitBuffer<uint64_t, base>::MAX_DIGITS;
+ const size_t chunk = size > U64_DIGITS ? U64_DIGITS : size;
+ digit_ptr -= chunk;
+ size -= chunk;
+ array[i] = accumulate<uint64_t>(base, digit_ptr, chunk);
+ }
+ return UIntT(array);
+ }
+ }
+};
+
+// Detects the base of the number and dispatches to the right implementation.
+template <typename T>
+LIBC_INLINE constexpr T parse_with_prefix(const char *ptr) {
+ using P = Parser<T>;
+ if (ptr[0] == '0' && ptr[1] == 'x')
+ return P::template parse<16>(ptr + 2);
+ else if (ptr[0] == '0' && ptr[1] == 'b')
+ return P::template parse<2>(ptr + 2);
+ else
+ return P::template parse<10>(ptr);
+}
+
+} // namespace internal
+
+LIBC_INLINE constexpr UInt128 operator""_u128(const char *x) {
+ return internal::parse_with_prefix<UInt128>(x);
+}
+
+LIBC_INLINE constexpr auto operator""_u256(const char *x) {
+ return internal::parse_with_prefix<cpp::UInt<256>>(x);
+}
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC___SUPPORT_INTEGER_LITERALS_H
diff --git a/libc/src/__support/integer_utils.h b/libc/src/__support/integer_utils.h
index 1d9a134..dd407f9 100644
--- a/libc/src/__support/integer_utils.h
+++ b/libc/src/__support/integer_utils.h
@@ -19,7 +19,28 @@
namespace LIBC_NAMESPACE {
-template <typename T> NumberPair<T> full_mul(T a, T b);
+template <typename T> NumberPair<T> full_mul(T a, T b) {
+ NumberPair<T> pa = split(a);
+ NumberPair<T> pb = split(b);
+ NumberPair<T> prod;
+
+ prod.lo = pa.lo * pb.lo; // exact
+ prod.hi = pa.hi * pb.hi; // exact
+ NumberPair<T> lo_hi = split(pa.lo * pb.hi); // exact
+ NumberPair<T> hi_lo = split(pa.hi * pb.lo); // exact
+
+ constexpr size_t HALF_BIT_WIDTH = sizeof(T) * CHAR_BIT / 2;
+
+ auto r1 = add_with_carry(prod.lo, lo_hi.lo << HALF_BIT_WIDTH, T(0));
+ prod.lo = r1.sum;
+ prod.hi = add_with_carry(prod.hi, lo_hi.hi, r1.carry).sum;
+
+ auto r2 = add_with_carry(prod.lo, hi_lo.lo << HALF_BIT_WIDTH, T(0));
+ prod.lo = r2.sum;
+ prod.hi = add_with_carry(prod.hi, hi_lo.hi, r2.carry).sum;
+
+ return prod;
+}
template <>
LIBC_INLINE NumberPair<uint32_t> full_mul<uint32_t>(uint32_t a, uint32_t b) {
@@ -30,35 +51,16 @@ LIBC_INLINE NumberPair<uint32_t> full_mul<uint32_t>(uint32_t a, uint32_t b) {
return result;
}
+#ifdef __SIZEOF_INT128__
template <>
LIBC_INLINE NumberPair<uint64_t> full_mul<uint64_t>(uint64_t a, uint64_t b) {
-#ifdef __SIZEOF_INT128__
__uint128_t prod = __uint128_t(a) * __uint128_t(b);
NumberPair<uint64_t> result;
result.lo = uint64_t(prod);
result.hi = uint64_t(prod >> 64);
return result;
-#else
- NumberPair<uint64_t> pa = split(a);
- NumberPair<uint64_t> pb = split(b);
- NumberPair<uint64_t> prod;
-
- prod.lo = pa.lo * pb.lo; // exact
- prod.hi = pa.hi * pb.hi; // exact
- NumberPair<uint64_t> lo_hi = split(pa.lo * pb.hi); // exact
- NumberPair<uint64_t> hi_lo = split(pa.hi * pb.lo); // exact
-
- auto r1 = add_with_carry(prod.lo, lo_hi.lo << 32, uint64_t(0));
- prod.lo = r1.sum;
- prod.hi = add_with_carry(prod.hi, lo_hi.hi, r1.carry).sum;
-
- auto r2 = add_with_carry(prod.lo, hi_lo.lo << 32, uint64_t(0));
- prod.lo = r2.sum;
- prod.hi = add_with_carry(prod.hi, hi_lo.hi, r2.carry).sum;
-
- return prod;
-#endif // __SIZEOF_INT128__
}
+#endif // __SIZEOF_INT128__
} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/CMakeLists.txt b/libc/src/math/CMakeLists.txt
index 985585c..05ce51e 100644
--- a/libc/src/math/CMakeLists.txt
+++ b/libc/src/math/CMakeLists.txt
@@ -149,6 +149,7 @@ add_math_entrypoint_object(ilogbl)
add_math_entrypoint_object(ldexp)
add_math_entrypoint_object(ldexpf)
add_math_entrypoint_object(ldexpl)
+add_math_entrypoint_object(ldexpf128)
add_math_entrypoint_object(log10)
add_math_entrypoint_object(log10f)
diff --git a/libc/src/math/generic/CMakeLists.txt b/libc/src/math/generic/CMakeLists.txt
index fdf383f..259ae1c 100644
--- a/libc/src/math/generic/CMakeLists.txt
+++ b/libc/src/math/generic/CMakeLists.txt
@@ -1001,10 +1001,10 @@ add_entrypoint_object(
ldexp.cpp
HDRS
../ldexp.h
+ COMPILE_OPTIONS
+ -O3
DEPENDS
libc.src.__support.FPUtil.manipulation_functions
- COMPILE_OPTIONS
- -O2
)
add_entrypoint_object(
@@ -1013,10 +1013,10 @@ add_entrypoint_object(
ldexpf.cpp
HDRS
../ldexpf.h
+ COMPILE_OPTIONS
+ -O3
DEPENDS
libc.src.__support.FPUtil.manipulation_functions
- COMPILE_OPTIONS
- -O2
)
add_entrypoint_object(
@@ -1025,10 +1025,23 @@ add_entrypoint_object(
ldexpl.cpp
HDRS
../ldexpl.h
+ COMPILE_OPTIONS
+ -O3
DEPENDS
libc.src.__support.FPUtil.manipulation_functions
+)
+
+add_entrypoint_object(
+ ldexpf128
+ SRCS
+ ldexpf128.cpp
+ HDRS
+ ../ldexpf128.h
COMPILE_OPTIONS
- -O2
+ -O3
+ DEPENDS
+ libc.src.__support.macros.properties.float
+ libc.src.__support.FPUtil.manipulation_functions
)
add_object_library(
diff --git a/libc/src/math/generic/acoshf.cpp b/libc/src/math/generic/acoshf.cpp
index 54b66bf..a4a75a7 100644
--- a/libc/src/math/generic/acoshf.cpp
+++ b/libc/src/math/generic/acoshf.cpp
@@ -33,12 +33,8 @@ LLVM_LIBC_FUNCTION(float, acoshf, (float x)) {
}
if (LIBC_UNLIKELY(x_u >= 0x4f8ffb03)) {
- // Check for exceptional values.
- uint32_t x_abs = xbits.abs().uintval();
- if (LIBC_UNLIKELY(x_abs >= 0x7f80'0000U)) {
- // x is +inf or NaN.
+ if (LIBC_UNLIKELY(xbits.is_inf_or_nan()))
return x;
- }
// Helper functions to set results for exceptional cases.
auto round_result_slightly_down = [](float r) -> float {
diff --git a/libc/src/math/generic/asinhf.cpp b/libc/src/math/generic/asinhf.cpp
index ac05991..6e35178 100644
--- a/libc/src/math/generic/asinhf.cpp
+++ b/libc/src/math/generic/asinhf.cpp
@@ -59,10 +59,8 @@ LLVM_LIBC_FUNCTION(float, asinhf, (float x)) {
};
if (LIBC_UNLIKELY(x_abs >= 0x4bdd'65a5U)) {
- if (LIBC_UNLIKELY(x_abs >= 0x7f80'0000U)) {
- // x is +-inf or nan
+ if (LIBC_UNLIKELY(xbits.is_inf_or_nan()))
return x;
- }
// Exceptional cases when x > 2^24.
switch (x_abs) {
diff --git a/libc/src/math/generic/ldexpf128.cpp b/libc/src/math/generic/ldexpf128.cpp
new file mode 100644
index 0000000..ed2ebd3
--- /dev/null
+++ b/libc/src/math/generic/ldexpf128.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of ldexpf128 function ------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/ldexpf128.h"
+#include "src/__support/FPUtil/ManipulationFunctions.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float128, ldexpf128, (float128 x, int exp)) {
+ return fputil::ldexp(x, exp);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/ldexpf128.h b/libc/src/math/ldexpf128.h
new file mode 100644
index 0000000..adf9d8f
--- /dev/null
+++ b/libc/src/math/ldexpf128.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for ldexpf128 ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_LDEXPF128_H
+#define LLVM_LIBC_SRC_MATH_LDEXPF128_H
+
+#include "src/__support/macros/properties/float.h"
+
+namespace LIBC_NAMESPACE {
+
+float128 ldexpf128(float128 x, int exp);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_LDEXPF128_H
diff --git a/libc/src/stdbit/CMakeLists.txt b/libc/src/stdbit/CMakeLists.txt
index 65d5f34..14cc26e 100644
--- a/libc/src/stdbit/CMakeLists.txt
+++ b/libc/src/stdbit/CMakeLists.txt
@@ -4,6 +4,9 @@ set(prefixes
trailing_zeros
trailing_ones
first_leading_zero
+ first_leading_one
+ first_trailing_zero
+ first_trailing_one
)
set(suffixes c s i l ll)
foreach(prefix IN LISTS prefixes)
diff --git a/libc/src/stdbit/stdc_first_leading_one_uc.cpp b/libc/src/stdbit/stdc_first_leading_one_uc.cpp
new file mode 100644
index 0000000..0287159
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_leading_one_uc.cpp
@@ -0,0 +1,20 @@
+//===-- Implementation of stdc_first_leading_one_uc -----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/stdbit/stdc_first_leading_one_uc.h"
+
+#include "src/__support/CPP/bit.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(unsigned, stdc_first_leading_one_uc, (unsigned char value)) {
+ return static_cast<unsigned>(cpp::first_leading_one(value));
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdbit/stdc_first_leading_one_uc.h b/libc/src/stdbit/stdc_first_leading_one_uc.h
new file mode 100644
index 0000000..58892c3
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_leading_one_uc.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for stdc_first_leading_one_uc -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STDBIT_STDC_FIRST_LEADING_ONE_UC_H
+#define LLVM_LIBC_SRC_STDBIT_STDC_FIRST_LEADING_ONE_UC_H
+
+namespace LIBC_NAMESPACE {
+
+unsigned stdc_first_leading_one_uc(unsigned char value);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDBIT_STDC_FIRST_LEADING_ONE_UC_H
diff --git a/libc/src/stdbit/stdc_first_leading_one_ui.cpp b/libc/src/stdbit/stdc_first_leading_one_ui.cpp
new file mode 100644
index 0000000..a6c7ef5
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_leading_one_ui.cpp
@@ -0,0 +1,20 @@
+//===-- Implementation of stdc_first_leading_one_ui -----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/stdbit/stdc_first_leading_one_ui.h"
+
+#include "src/__support/CPP/bit.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(unsigned, stdc_first_leading_one_ui, (unsigned value)) {
+ return static_cast<unsigned>(cpp::first_leading_one(value));
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdbit/stdc_first_leading_one_ui.h b/libc/src/stdbit/stdc_first_leading_one_ui.h
new file mode 100644
index 0000000..613adf4
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_leading_one_ui.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for stdc_first_leading_one_ui -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STDBIT_STDC_FIRST_LEADING_ONE_UI_H
+#define LLVM_LIBC_SRC_STDBIT_STDC_FIRST_LEADING_ONE_UI_H
+
+namespace LIBC_NAMESPACE {
+
+unsigned stdc_first_leading_one_ui(unsigned value);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDBIT_STDC_FIRST_LEADING_ONE_UI_H
diff --git a/libc/src/stdbit/stdc_first_leading_one_ul.cpp b/libc/src/stdbit/stdc_first_leading_one_ul.cpp
new file mode 100644
index 0000000..d1bcab5
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_leading_one_ul.cpp
@@ -0,0 +1,20 @@
+//===-- Implementation of stdc_first_leading_one_ul -----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/stdbit/stdc_first_leading_one_ul.h"
+
+#include "src/__support/CPP/bit.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(unsigned, stdc_first_leading_one_ul, (unsigned long value)) {
+ return static_cast<unsigned>(cpp::first_leading_one(value));
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdbit/stdc_first_leading_one_ul.h b/libc/src/stdbit/stdc_first_leading_one_ul.h
new file mode 100644
index 0000000..47c179f
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_leading_one_ul.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for stdc_first_leading_one_ul -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STDBIT_STDC_FIRST_LEADING_ONE_UL_H
+#define LLVM_LIBC_SRC_STDBIT_STDC_FIRST_LEADING_ONE_UL_H
+
+namespace LIBC_NAMESPACE {
+
+unsigned stdc_first_leading_one_ul(unsigned long value);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDBIT_STDC_FIRST_LEADING_ONE_UL_H
diff --git a/libc/src/stdbit/stdc_first_leading_one_ull.cpp b/libc/src/stdbit/stdc_first_leading_one_ull.cpp
new file mode 100644
index 0000000..7be8f10
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_leading_one_ull.cpp
@@ -0,0 +1,21 @@
+//===-- Implementation of stdc_first_leading_one_ull ----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/stdbit/stdc_first_leading_one_ull.h"
+
+#include "src/__support/CPP/bit.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(unsigned, stdc_first_leading_one_ull,
+ (unsigned long long value)) {
+ return static_cast<unsigned>(cpp::first_leading_one(value));
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdbit/stdc_first_leading_one_ull.h b/libc/src/stdbit/stdc_first_leading_one_ull.h
new file mode 100644
index 0000000..344d03f
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_leading_one_ull.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for stdc_first_leading_one_ull ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STDBIT_STDC_FIRST_LEADING_ONE_ULL_H
+#define LLVM_LIBC_SRC_STDBIT_STDC_FIRST_LEADING_ONE_ULL_H
+
+namespace LIBC_NAMESPACE {
+
+unsigned stdc_first_leading_one_ull(unsigned long long value);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDBIT_STDC_FIRST_LEADING_ONE_ULL_H
diff --git a/libc/src/stdbit/stdc_first_leading_one_us.cpp b/libc/src/stdbit/stdc_first_leading_one_us.cpp
new file mode 100644
index 0000000..7a4c7e6
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_leading_one_us.cpp
@@ -0,0 +1,21 @@
+//===-- Implementation of stdc_first_leading_one_us -----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/stdbit/stdc_first_leading_one_us.h"
+
+#include "src/__support/CPP/bit.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(unsigned, stdc_first_leading_one_us,
+ (unsigned short value)) {
+ return static_cast<unsigned>(cpp::first_leading_one(value));
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdbit/stdc_first_leading_one_us.h b/libc/src/stdbit/stdc_first_leading_one_us.h
new file mode 100644
index 0000000..9d5feaf1
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_leading_one_us.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for stdc_first_leading_one_us -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STDBIT_STDC_FIRST_LEADING_ONE_US_H
+#define LLVM_LIBC_SRC_STDBIT_STDC_FIRST_LEADING_ONE_US_H
+
+namespace LIBC_NAMESPACE {
+
+unsigned stdc_first_leading_one_us(unsigned short value);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDBIT_STDC_FIRST_LEADING_ONE_US_H
diff --git a/libc/src/stdbit/stdc_first_trailing_one_uc.cpp b/libc/src/stdbit/stdc_first_trailing_one_uc.cpp
new file mode 100644
index 0000000..6ed3596
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_trailing_one_uc.cpp
@@ -0,0 +1,21 @@
+//===-- Implementation of stdc_first_trailing_one_uc ----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/stdbit/stdc_first_trailing_one_uc.h"
+
+#include "src/__support/CPP/bit.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(unsigned, stdc_first_trailing_one_uc,
+ (unsigned char value)) {
+ return static_cast<unsigned>(cpp::first_trailing_one(value));
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdbit/stdc_first_trailing_one_uc.h b/libc/src/stdbit/stdc_first_trailing_one_uc.h
new file mode 100644
index 0000000..d733ce8
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_trailing_one_uc.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for stdc_first_trailing_one_uc ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ONE_UC_H
+#define LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ONE_UC_H
+
+namespace LIBC_NAMESPACE {
+
+unsigned stdc_first_trailing_one_uc(unsigned char value);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ONE_UC_H
diff --git a/libc/src/stdbit/stdc_first_trailing_one_ui.cpp b/libc/src/stdbit/stdc_first_trailing_one_ui.cpp
new file mode 100644
index 0000000..a89083b
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_trailing_one_ui.cpp
@@ -0,0 +1,20 @@
+//===-- Implementation of stdc_first_trailing_one_ui ----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/stdbit/stdc_first_trailing_one_ui.h"
+
+#include "src/__support/CPP/bit.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(unsigned, stdc_first_trailing_one_ui, (unsigned value)) {
+ return static_cast<unsigned>(cpp::first_trailing_one(value));
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdbit/stdc_first_trailing_one_ui.h b/libc/src/stdbit/stdc_first_trailing_one_ui.h
new file mode 100644
index 0000000..6a6a504
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_trailing_one_ui.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for stdc_first_trailing_one_ui ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ONE_UI_H
+#define LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ONE_UI_H
+
+namespace LIBC_NAMESPACE {
+
+unsigned stdc_first_trailing_one_ui(unsigned value);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ONE_UI_H
diff --git a/libc/src/stdbit/stdc_first_trailing_one_ul.cpp b/libc/src/stdbit/stdc_first_trailing_one_ul.cpp
new file mode 100644
index 0000000..f30078d
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_trailing_one_ul.cpp
@@ -0,0 +1,21 @@
+//===-- Implementation of stdc_first_trailing_one_ul ----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/stdbit/stdc_first_trailing_one_ul.h"
+
+#include "src/__support/CPP/bit.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(unsigned, stdc_first_trailing_one_ul,
+ (unsigned long value)) {
+ return static_cast<unsigned>(cpp::first_trailing_one(value));
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdbit/stdc_first_trailing_one_ul.h b/libc/src/stdbit/stdc_first_trailing_one_ul.h
new file mode 100644
index 0000000..09b6a9b
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_trailing_one_ul.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for stdc_first_trailing_one_ul ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ONE_UL_H
+#define LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ONE_UL_H
+
+namespace LIBC_NAMESPACE {
+
+unsigned stdc_first_trailing_one_ul(unsigned long value);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ONE_UL_H
diff --git a/libc/src/stdbit/stdc_first_trailing_one_ull.cpp b/libc/src/stdbit/stdc_first_trailing_one_ull.cpp
new file mode 100644
index 0000000..2e526a8
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_trailing_one_ull.cpp
@@ -0,0 +1,21 @@
+//===-- Implementation of stdc_first_trailing_one_ull ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/stdbit/stdc_first_trailing_one_ull.h"
+
+#include "src/__support/CPP/bit.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(unsigned, stdc_first_trailing_one_ull,
+ (unsigned long long value)) {
+ return static_cast<unsigned>(cpp::first_trailing_one(value));
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdbit/stdc_first_trailing_one_ull.h b/libc/src/stdbit/stdc_first_trailing_one_ull.h
new file mode 100644
index 0000000..3e12a1d
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_trailing_one_ull.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for stdc_first_trailing_one_ull --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ONE_ULL_H
+#define LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ONE_ULL_H
+
+namespace LIBC_NAMESPACE {
+
+unsigned stdc_first_trailing_one_ull(unsigned long long value);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ONE_ULL_H
diff --git a/libc/src/stdbit/stdc_first_trailing_one_us.cpp b/libc/src/stdbit/stdc_first_trailing_one_us.cpp
new file mode 100644
index 0000000..e4c88e0
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_trailing_one_us.cpp
@@ -0,0 +1,21 @@
+//===-- Implementation of stdc_first_trailing_one_us ----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/stdbit/stdc_first_trailing_one_us.h"
+
+#include "src/__support/CPP/bit.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(unsigned, stdc_first_trailing_one_us,
+ (unsigned short value)) {
+ return static_cast<unsigned>(cpp::first_trailing_one(value));
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdbit/stdc_first_trailing_one_us.h b/libc/src/stdbit/stdc_first_trailing_one_us.h
new file mode 100644
index 0000000..f380898
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_trailing_one_us.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for stdc_first_trailing_one_us ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ONE_US_H
+#define LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ONE_US_H
+
+namespace LIBC_NAMESPACE {
+
+unsigned stdc_first_trailing_one_us(unsigned short value);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ONE_US_H
diff --git a/libc/src/stdbit/stdc_first_trailing_zero_uc.cpp b/libc/src/stdbit/stdc_first_trailing_zero_uc.cpp
new file mode 100644
index 0000000..5825d5d
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_trailing_zero_uc.cpp
@@ -0,0 +1,21 @@
+//===-- Implementation of stdc_first_trailing_zero_uc ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/stdbit/stdc_first_trailing_zero_uc.h"
+
+#include "src/__support/CPP/bit.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(unsigned, stdc_first_trailing_zero_uc,
+ (unsigned char value)) {
+ return static_cast<unsigned>(cpp::first_trailing_zero(value));
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdbit/stdc_first_trailing_zero_uc.h b/libc/src/stdbit/stdc_first_trailing_zero_uc.h
new file mode 100644
index 0000000..242472a
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_trailing_zero_uc.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for stdc_first_trailing_zero_uc ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ZERO_UC_H
+#define LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ZERO_UC_H
+
+namespace LIBC_NAMESPACE {
+
+unsigned stdc_first_trailing_zero_uc(unsigned char value);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ZERO_UC_H
diff --git a/libc/src/stdbit/stdc_first_trailing_zero_ui.cpp b/libc/src/stdbit/stdc_first_trailing_zero_ui.cpp
new file mode 100644
index 0000000..3b51b5f
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_trailing_zero_ui.cpp
@@ -0,0 +1,20 @@
+//===-- Implementation of stdc_first_trailing_zero_ui ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/stdbit/stdc_first_trailing_zero_ui.h"
+
+#include "src/__support/CPP/bit.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(unsigned, stdc_first_trailing_zero_ui, (unsigned value)) {
+ return static_cast<unsigned>(cpp::first_trailing_zero(value));
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdbit/stdc_first_trailing_zero_ui.h b/libc/src/stdbit/stdc_first_trailing_zero_ui.h
new file mode 100644
index 0000000..cc308f7
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_trailing_zero_ui.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for stdc_first_trailing_zero_ui ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ZERO_UI_H
+#define LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ZERO_UI_H
+
+namespace LIBC_NAMESPACE {
+
+unsigned stdc_first_trailing_zero_ui(unsigned value);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ZERO_UI_H
diff --git a/libc/src/stdbit/stdc_first_trailing_zero_ul.cpp b/libc/src/stdbit/stdc_first_trailing_zero_ul.cpp
new file mode 100644
index 0000000..abf1229
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_trailing_zero_ul.cpp
@@ -0,0 +1,21 @@
+//===-- Implementation of stdc_first_trailing_zero_ul ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/stdbit/stdc_first_trailing_zero_ul.h"
+
+#include "src/__support/CPP/bit.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(unsigned, stdc_first_trailing_zero_ul,
+ (unsigned long value)) {
+ return static_cast<unsigned>(cpp::first_trailing_zero(value));
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdbit/stdc_first_trailing_zero_ul.h b/libc/src/stdbit/stdc_first_trailing_zero_ul.h
new file mode 100644
index 0000000..8241337
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_trailing_zero_ul.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for stdc_first_trailing_zero_ul ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ZERO_UL_H
+#define LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ZERO_UL_H
+
+namespace LIBC_NAMESPACE {
+
+unsigned stdc_first_trailing_zero_ul(unsigned long value);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ZERO_UL_H
diff --git a/libc/src/stdbit/stdc_first_trailing_zero_ull.cpp b/libc/src/stdbit/stdc_first_trailing_zero_ull.cpp
new file mode 100644
index 0000000..336e7d6
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_trailing_zero_ull.cpp
@@ -0,0 +1,21 @@
+//===-- Implementation of stdc_first_trailing_zero_ull --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/stdbit/stdc_first_trailing_zero_ull.h"
+
+#include "src/__support/CPP/bit.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(unsigned, stdc_first_trailing_zero_ull,
+ (unsigned long long value)) {
+ return static_cast<unsigned>(cpp::first_trailing_zero(value));
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdbit/stdc_first_trailing_zero_ull.h b/libc/src/stdbit/stdc_first_trailing_zero_ull.h
new file mode 100644
index 0000000..3737fc1
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_trailing_zero_ull.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for stdc_first_trailing_zero_ull --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ZERO_ULL_H
+#define LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ZERO_ULL_H
+
+namespace LIBC_NAMESPACE {
+
+unsigned stdc_first_trailing_zero_ull(unsigned long long value);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ZERO_ULL_H
diff --git a/libc/src/stdbit/stdc_first_trailing_zero_us.cpp b/libc/src/stdbit/stdc_first_trailing_zero_us.cpp
new file mode 100644
index 0000000..b7d0504
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_trailing_zero_us.cpp
@@ -0,0 +1,21 @@
+//===-- Implementation of stdc_first_trailing_zero_us ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/stdbit/stdc_first_trailing_zero_us.h"
+
+#include "src/__support/CPP/bit.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(unsigned, stdc_first_trailing_zero_us,
+ (unsigned short value)) {
+ return static_cast<unsigned>(cpp::first_trailing_zero(value));
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdbit/stdc_first_trailing_zero_us.h b/libc/src/stdbit/stdc_first_trailing_zero_us.h
new file mode 100644
index 0000000..608b052
--- /dev/null
+++ b/libc/src/stdbit/stdc_first_trailing_zero_us.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for stdc_first_trailing_zero_us ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ZERO_US_H
+#define LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ZERO_US_H
+
+namespace LIBC_NAMESPACE {
+
+unsigned stdc_first_trailing_zero_us(unsigned short value);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDBIT_STDC_FIRST_TRAILING_ZERO_US_H
diff --git a/libc/src/time/gpu/nanosleep.cpp b/libc/src/time/gpu/nanosleep.cpp
index e84fe62..dd669ff 100644
--- a/libc/src/time/gpu/nanosleep.cpp
+++ b/libc/src/time/gpu/nanosleep.cpp
@@ -12,29 +12,31 @@
namespace LIBC_NAMESPACE {
-constexpr uint64_t TICKS_PER_NS = 1000000000UL;
+constexpr uint64_t TICKS_PER_SEC = 1000000000UL;
LLVM_LIBC_FUNCTION(int, nanosleep,
(const struct timespec *req, struct timespec *rem)) {
if (!GPU_CLOCKS_PER_SEC || !req)
return -1;
- uint64_t nsecs = req->tv_nsec + req->tv_sec * TICKS_PER_NS;
+ uint64_t nsecs = req->tv_nsec + req->tv_sec * TICKS_PER_SEC;
+ uint64_t tick_rate = TICKS_PER_SEC / GPU_CLOCKS_PER_SEC;
uint64_t start = gpu::fixed_frequency_clock();
-#if defined(LIBC_TARGET_ARCH_IS_NVPTX) && __CUDA_ARCH__ >= 700
- uint64_t end = start + nsecs / (TICKS_PER_NS / GPU_CLOCKS_PER_SEC);
+#if defined(LIBC_TARGET_ARCH_IS_NVPTX)
+ uint64_t end = start + (nsecs + tick_rate - 1) / tick_rate;
uint64_t cur = gpu::fixed_frequency_clock();
// The NVPTX architecture supports sleeping and guaruntees the actual time
// slept will be somewhere between zero and twice the requested amount. Here
// we will sleep again if we undershot the time.
while (cur < end) {
- __nvvm_nanosleep(static_cast<uint32_t>(nsecs));
+ if (__nvvm_reflect("__CUDA_ARCH") >= 700)
+ LIBC_INLINE_ASM("nanosleep.u32 %0;" ::"r"(nsecs));
cur = gpu::fixed_frequency_clock();
nsecs -= nsecs > cur - start ? cur - start : 0;
}
#elif defined(LIBC_TARGET_ARCH_IS_AMDGPU)
- uint64_t end = start + nsecs / (TICKS_PER_NS / GPU_CLOCKS_PER_SEC);
+ uint64_t end = start + (nsecs + tick_rate - 1) / tick_rate;
uint64_t cur = gpu::fixed_frequency_clock();
// The AMDGPU architecture does not provide a sleep implementation with a
// known delay so we simply repeatedly sleep with a large value of ~960 clock
@@ -56,11 +58,11 @@ LLVM_LIBC_FUNCTION(int, nanosleep,
// Check to make sure we slept for at least the desired duration and set the
// remaining time if not.
- uint64_t elapsed = (stop - start) * (TICKS_PER_NS / GPU_CLOCKS_PER_SEC);
+ uint64_t elapsed = (stop - start) * tick_rate;
if (elapsed < nsecs) {
if (rem) {
- rem->tv_sec = (nsecs - elapsed) / TICKS_PER_NS;
- rem->tv_nsec = (nsecs - elapsed) % TICKS_PER_NS;
+ rem->tv_sec = (nsecs - elapsed) / TICKS_PER_SEC;
+ rem->tv_nsec = (nsecs - elapsed) % TICKS_PER_SEC;
}
return -1;
}
diff --git a/libc/src/time/gpu/time_utils.h b/libc/src/time/gpu/time_utils.h
index 531a748..8a9a5f0 100644
--- a/libc/src/time/gpu/time_utils.h
+++ b/libc/src/time/gpu/time_utils.h
@@ -15,24 +15,13 @@ namespace LIBC_NAMESPACE {
#if defined(LIBC_TARGET_ARCH_IS_AMDGPU)
// AMDGPU does not have a single set frequency. Different architectures and
-// cards can have vary values. Here we default to a few known values, but for
-// complete support the frequency needs to be read from the kernel driver.
-#if defined(__GFX10__) || defined(__GFX11__) || defined(__GFX12__) || \
- defined(__gfx940__) || defined(__gfx941__) || defined(__gfx942__)
-// These architectures use a 100 MHz fixed frequency clock.
-constexpr uint64_t clock_freq = 100000000;
-#elif defined(__GFX9__)
-// These architectures use a 25 MHz fixed frequency clock expect for Vega 10
-// which is actually 27 Mhz. We default to 25 MHz in all cases anyway.
-constexpr uint64_t clock_freq = 25000000;
-#else
-// The frequency for these architecture is unknown. We simply default to zero.
-constexpr uint64_t clock_freq = 0;
-#endif
+// cards can have different values. The actualy frequency needs to be read from
+// the kernel driver and will be between 25 MHz and 100 MHz on most cards. All
+// cards following the GFX9 ISAs use a 100 MHz clock so we will default to that.
+constexpr uint64_t clock_freq = 100000000UL;
// We provide an externally visible symbol such that the runtime can set
-// this to the correct value. If it is not set we try to default to the
-// known values.
+// this to the correct value.
extern "C" [[gnu::visibility("protected")]] uint64_t
[[clang::address_space(4)]] __llvm_libc_clock_freq;
#define GPU_CLOCKS_PER_SEC static_cast<clock_t>(__llvm_libc_clock_freq)
diff --git a/libc/test/UnitTest/CMakeLists.txt b/libc/test/UnitTest/CMakeLists.txt
index 9baa418..e3099e4 100644
--- a/libc/test/UnitTest/CMakeLists.txt
+++ b/libc/test/UnitTest/CMakeLists.txt
@@ -31,6 +31,9 @@ function(add_unittest_framework_library name)
if(TARGET libc.src.time.clock)
target_compile_definitions(${lib} PRIVATE TARGET_SUPPORTS_CLOCK)
endif()
+ if(LIBC_COMPILER_HAS_FIXED_POINT)
+ target_compile_options(${lib} PUBLIC -ffixed-point)
+ endif()
endforeach()
target_include_directories(${name}.hermetic PRIVATE ${LIBC_BUILD_DIR}/include)
target_compile_options(${name}.hermetic
diff --git a/libc/test/include/stdbit_test.cpp b/libc/test/include/stdbit_test.cpp
index 9a66a76..22d5533 100644
--- a/libc/test/include/stdbit_test.cpp
+++ b/libc/test/include/stdbit_test.cpp
@@ -50,6 +50,27 @@ unsigned stdc_first_leading_zero_ul(unsigned long) noexcept { return 0xEDU; }
unsigned stdc_first_leading_zero_ull(unsigned long long) noexcept {
return 0xEFU;
}
+unsigned stdc_first_leading_one_uc(unsigned char) noexcept { return 0xFAU; }
+unsigned stdc_first_leading_one_us(unsigned short) noexcept { return 0xFBU; }
+unsigned stdc_first_leading_one_ui(unsigned) noexcept { return 0xFCU; }
+unsigned stdc_first_leading_one_ul(unsigned long) noexcept { return 0xFDU; }
+unsigned stdc_first_leading_one_ull(unsigned long long) noexcept {
+ return 0xFFU;
+}
+unsigned stdc_first_trailing_zero_uc(unsigned char) noexcept { return 0x0AU; }
+unsigned stdc_first_trailing_zero_us(unsigned short) noexcept { return 0x0BU; }
+unsigned stdc_first_trailing_zero_ui(unsigned) noexcept { return 0x0CU; }
+unsigned stdc_first_trailing_zero_ul(unsigned long) noexcept { return 0x0DU; }
+unsigned stdc_first_trailing_zero_ull(unsigned long long) noexcept {
+ return 0x0FU;
+}
+unsigned stdc_first_trailing_one_uc(unsigned char) noexcept { return 0x1AU; }
+unsigned stdc_first_trailing_one_us(unsigned short) noexcept { return 0x1BU; }
+unsigned stdc_first_trailing_one_ui(unsigned) noexcept { return 0x1CU; }
+unsigned stdc_first_trailing_one_ul(unsigned long) noexcept { return 0x1DU; }
+unsigned stdc_first_trailing_one_ull(unsigned long long) noexcept {
+ return 0x1FU;
+}
}
#include "include/llvm-libc-macros/stdbit-macros.h"
@@ -93,3 +114,27 @@ TEST(LlvmLibcStdbitTest, TypeGenericMacroFirstLeadingZero) {
EXPECT_EQ(stdc_first_leading_zero(0UL), 0xEDU);
EXPECT_EQ(stdc_first_leading_zero(0ULL), 0xEFU);
}
+
+TEST(LlvmLibcStdbitTest, TypeGenericMacroFirstLeadingOne) {
+ EXPECT_EQ(stdc_first_leading_one(static_cast<unsigned char>(0U)), 0xFAU);
+ EXPECT_EQ(stdc_first_leading_one(static_cast<unsigned short>(0U)), 0xFBU);
+ EXPECT_EQ(stdc_first_leading_one(0U), 0xFCU);
+ EXPECT_EQ(stdc_first_leading_one(0UL), 0xFDU);
+ EXPECT_EQ(stdc_first_leading_one(0ULL), 0xFFU);
+}
+
+TEST(LlvmLibcStdbitTest, TypeGenericMacroFirstTrailingZero) {
+ EXPECT_EQ(stdc_first_trailing_zero(static_cast<unsigned char>(0U)), 0x0AU);
+ EXPECT_EQ(stdc_first_trailing_zero(static_cast<unsigned short>(0U)), 0x0BU);
+ EXPECT_EQ(stdc_first_trailing_zero(0U), 0x0CU);
+ EXPECT_EQ(stdc_first_trailing_zero(0UL), 0x0DU);
+ EXPECT_EQ(stdc_first_trailing_zero(0ULL), 0x0FU);
+}
+
+TEST(LlvmLibcStdbitTest, TypeGenericMacroFirstTrailingOne) {
+ EXPECT_EQ(stdc_first_trailing_one(static_cast<unsigned char>(0U)), 0x1AU);
+ EXPECT_EQ(stdc_first_trailing_one(static_cast<unsigned short>(0U)), 0x1BU);
+ EXPECT_EQ(stdc_first_trailing_one(0U), 0x1CU);
+ EXPECT_EQ(stdc_first_trailing_one(0UL), 0x1DU);
+ EXPECT_EQ(stdc_first_trailing_one(0ULL), 0x1FU);
+}
diff --git a/libc/test/src/__support/CMakeLists.txt b/libc/test/src/__support/CMakeLists.txt
index a92e6da..850f538 100644
--- a/libc/test/src/__support/CMakeLists.txt
+++ b/libc/test/src/__support/CMakeLists.txt
@@ -86,14 +86,28 @@ if(NOT LIBC_TARGET_ARCHITECTURE_IS_GPU)
)
endif()
+# FIXME: Crash in NVPTX target lowering for calls
+if(NOT LIBC_GPU_TARGET_ARCHITECTURE_IS_NVPTX)
+ add_libc_test(
+ uint_test
+ SUITE
+ libc-support-tests
+ SRCS
+ uint_test.cpp
+ DEPENDS
+ libc.src.__support.uint
+ libc.src.__support.CPP.optional
+ )
+endif()
+
add_libc_test(
- uint_test
+ integer_literals_test
SUITE
libc-support-tests
SRCS
- uint_test.cpp
+ integer_literals_test.cpp
DEPENDS
- libc.src.__support.uint
+ libc.src.__support.integer_literals
libc.src.__support.CPP.optional
)
diff --git a/libc/test/src/__support/CPP/bit_test.cpp b/libc/test/src/__support/CPP/bit_test.cpp
index 00d8ca5..5d1f451 100644
--- a/libc/test/src/__support/CPP/bit_test.cpp
+++ b/libc/test/src/__support/CPP/bit_test.cpp
@@ -213,4 +213,23 @@ TYPED_TEST(LlvmLibcBitTest, FirstLeadingZero, UnsignedTypes) {
cpp::numeric_limits<T>::digits - i);
}
+TYPED_TEST(LlvmLibcBitTest, FirstLeadingOne, UnsignedTypes) {
+ EXPECT_EQ(first_leading_one<T>(static_cast<T>(0)), 0);
+ for (int i = 0U; i != cpp::numeric_limits<T>::digits; ++i)
+ EXPECT_EQ(first_leading_one<T>(T(1) << i),
+ cpp::numeric_limits<T>::digits - i);
+}
+
+TYPED_TEST(LlvmLibcBitTest, FirstTrailingZero, UnsignedTypes) {
+ EXPECT_EQ(first_trailing_zero<T>(cpp::numeric_limits<T>::max()), 0);
+ for (int i = 0U; i != cpp::numeric_limits<T>::digits; ++i)
+ EXPECT_EQ(first_trailing_zero<T>(~(T(1) << i)), i + 1);
+}
+
+TYPED_TEST(LlvmLibcBitTest, FirstTrailingOne, UnsignedTypes) {
+ EXPECT_EQ(first_trailing_one<T>(cpp::numeric_limits<T>::max()), 0);
+ for (int i = 0U; i != cpp::numeric_limits<T>::digits; ++i)
+ EXPECT_EQ(first_trailing_one<T>(T(1) << i), i + 1);
+}
+
} // namespace LIBC_NAMESPACE::cpp
diff --git a/libc/test/src/__support/FPUtil/CMakeLists.txt b/libc/test/src/__support/FPUtil/CMakeLists.txt
index 897434c..f1a027a 100644
--- a/libc/test/src/__support/FPUtil/CMakeLists.txt
+++ b/libc/test/src/__support/FPUtil/CMakeLists.txt
@@ -23,6 +23,7 @@ add_libc_test(
DEPENDS
libc.src.__support.FPUtil.fp_bits
libc.src.__support.FPUtil.fpbits_str
+ libc.src.__support.integer_literals
)
add_fp_unittest(
diff --git a/libc/test/src/__support/FPUtil/dyadic_float_test.cpp b/libc/test/src/__support/FPUtil/dyadic_float_test.cpp
index a9f9842..625aa70 100644
--- a/libc/test/src/__support/FPUtil/dyadic_float_test.cpp
+++ b/libc/test/src/__support/FPUtil/dyadic_float_test.cpp
@@ -56,3 +56,37 @@ TEST(LlvmLibcDyadicFloatTest, QuickMul) {
Float256 z = quick_mul(x, y);
EXPECT_FP_EQ_ALL_ROUNDING(double(x) * double(y), double(z));
}
+
+#define TEST_EDGE_RANGES(Name, Type) \
+ TEST(LlvmLibcDyadicFloatTest, EdgeRanges##Name) { \
+ using Bits = LIBC_NAMESPACE::fputil::FPBits<Type>; \
+ using DFType = LIBC_NAMESPACE::fputil::DyadicFloat<Bits::STORAGE_LEN>; \
+ Type max_normal = Bits::max_normal().get_val(); \
+ Type min_normal = Bits::min_normal().get_val(); \
+ Type min_subnormal = Bits::min_subnormal().get_val(); \
+ Type two(2); \
+ \
+ DFType x(min_normal); \
+ EXPECT_FP_EQ_ALL_ROUNDING(min_normal, static_cast<Type>(x)); \
+ --x.exponent; \
+ EXPECT_FP_EQ(min_normal / two, static_cast<Type>(x)); \
+ \
+ DFType y(two *min_normal - min_subnormal); \
+ --y.exponent; \
+ EXPECT_FP_EQ(min_normal, static_cast<Type>(y)); \
+ \
+ DFType z(min_subnormal); \
+ EXPECT_FP_EQ_ALL_ROUNDING(min_subnormal, static_cast<Type>(z)); \
+ --z.exponent; \
+ EXPECT_FP_EQ(Bits::zero().get_val(), static_cast<Type>(z)); \
+ \
+ DFType t(max_normal); \
+ EXPECT_FP_EQ_ALL_ROUNDING(max_normal, static_cast<Type>(t)); \
+ ++t.exponent; \
+ EXPECT_FP_EQ(Bits::inf().get_val(), static_cast<Type>(t)); \
+ } \
+ static_assert(true, "Require semicolon.")
+
+TEST_EDGE_RANGES(Float, float);
+TEST_EDGE_RANGES(Double, double);
+TEST_EDGE_RANGES(LongDouble, long double);
diff --git a/libc/test/src/__support/FPUtil/fpbits_test.cpp b/libc/test/src/__support/FPUtil/fpbits_test.cpp
index b1c4b66..1c8a1c5 100644
--- a/libc/test/src/__support/FPUtil/fpbits_test.cpp
+++ b/libc/test/src/__support/FPUtil/fpbits_test.cpp
@@ -8,6 +8,7 @@
#include "src/__support/FPUtil/FPBits.h"
#include "src/__support/FPUtil/fpbits_str.h"
+#include "src/__support/integer_literals.h"
#include "test/UnitTest/Test.h"
using LIBC_NAMESPACE::fputil::FPBits;
@@ -15,37 +16,42 @@ using LIBC_NAMESPACE::fputil::FPType;
using LIBC_NAMESPACE::fputil::Sign;
using LIBC_NAMESPACE::fputil::internal::FPRep;
+using LIBC_NAMESPACE::operator""_u16;
+using LIBC_NAMESPACE::operator""_u32;
+using LIBC_NAMESPACE::operator""_u64;
+using LIBC_NAMESPACE::operator""_u128;
+
TEST(LlvmLibcFPBitsTest, FPType_IEEE754_Binary16) {
using Rep = FPRep<FPType::IEEE754_Binary16>;
using u16 = typename Rep::StorageType;
- EXPECT_EQ(u16(0b0'00000'0000000000), u16(Rep::zero()));
- EXPECT_EQ(u16(0b0'01111'0000000000), u16(Rep::one()));
- EXPECT_EQ(u16(0b0'00000'0000000001), u16(Rep::min_subnormal()));
- EXPECT_EQ(u16(0b0'00000'1111111111), u16(Rep::max_subnormal()));
- EXPECT_EQ(u16(0b0'00001'0000000000), u16(Rep::min_normal()));
- EXPECT_EQ(u16(0b0'11110'1111111111), u16(Rep::max_normal()));
- EXPECT_EQ(u16(0b0'11111'0000000000), u16(Rep::inf()));
- EXPECT_EQ(u16(0b0'11111'0100000000), u16(Rep::signaling_nan()));
- EXPECT_EQ(u16(0b0'11111'1000000000), u16(Rep::quiet_nan()));
+ EXPECT_EQ(0b0'00000'0000000000_u16, u16(Rep::zero()));
+ EXPECT_EQ(0b0'01111'0000000000_u16, u16(Rep::one()));
+ EXPECT_EQ(0b0'00000'0000000001_u16, u16(Rep::min_subnormal()));
+ EXPECT_EQ(0b0'00000'1111111111_u16, u16(Rep::max_subnormal()));
+ EXPECT_EQ(0b0'00001'0000000000_u16, u16(Rep::min_normal()));
+ EXPECT_EQ(0b0'11110'1111111111_u16, u16(Rep::max_normal()));
+ EXPECT_EQ(0b0'11111'0000000000_u16, u16(Rep::inf()));
+ EXPECT_EQ(0b0'11111'0100000000_u16, u16(Rep::signaling_nan()));
+ EXPECT_EQ(0b0'11111'1000000000_u16, u16(Rep::quiet_nan()));
}
TEST(LlvmLibcFPBitsTest, FPType_IEEE754_Binary32) {
using Rep = FPRep<FPType::IEEE754_Binary32>;
using u32 = typename Rep::StorageType;
- EXPECT_EQ(u32(0b0'00000000'00000000000000000000000), u32(Rep::zero()));
- EXPECT_EQ(u32(0b0'01111111'00000000000000000000000), u32(Rep::one()));
- EXPECT_EQ(u32(0b0'00000000'00000000000000000000001),
+ EXPECT_EQ(0b0'00000000'00000000000000000000000_u32, u32(Rep::zero()));
+ EXPECT_EQ(0b0'01111111'00000000000000000000000_u32, u32(Rep::one()));
+ EXPECT_EQ(0b0'00000000'00000000000000000000001_u32,
u32(Rep::min_subnormal()));
- EXPECT_EQ(u32(0b0'00000000'11111111111111111111111),
+ EXPECT_EQ(0b0'00000000'11111111111111111111111_u32,
u32(Rep::max_subnormal()));
- EXPECT_EQ(u32(0b0'00000001'00000000000000000000000), u32(Rep::min_normal()));
- EXPECT_EQ(u32(0b0'11111110'11111111111111111111111), u32(Rep::max_normal()));
- EXPECT_EQ(u32(0b0'11111111'00000000000000000000000), u32(Rep::inf()));
- EXPECT_EQ(u32(0b0'11111111'01000000000000000000000),
+ EXPECT_EQ(0b0'00000001'00000000000000000000000_u32, u32(Rep::min_normal()));
+ EXPECT_EQ(0b0'11111110'11111111111111111111111_u32, u32(Rep::max_normal()));
+ EXPECT_EQ(0b0'11111111'00000000000000000000000_u32, u32(Rep::inf()));
+ EXPECT_EQ(0b0'11111111'01000000000000000000000_u32,
u32(Rep::signaling_nan()));
- EXPECT_EQ(u32(0b0'11111111'10000000000000000000000), u32(Rep::quiet_nan()));
+ EXPECT_EQ(0b0'11111111'10000000000000000000000_u32, u32(Rep::quiet_nan()));
}
TEST(LlvmLibcFPBitsTest, FPType_IEEE754_Binary64) {
@@ -53,80 +59,63 @@ TEST(LlvmLibcFPBitsTest, FPType_IEEE754_Binary64) {
using u64 = typename Rep::StorageType;
EXPECT_EQ(
- u64(0b0'00000000000'0000000000000000000000000000000000000000000000000000),
+ 0b0'00000000000'0000000000000000000000000000000000000000000000000000_u64,
u64(Rep::zero()));
EXPECT_EQ(
- u64(0b0'01111111111'0000000000000000000000000000000000000000000000000000),
+ 0b0'01111111111'0000000000000000000000000000000000000000000000000000_u64,
u64(Rep::one()));
EXPECT_EQ(
- u64(0b0'00000000000'0000000000000000000000000000000000000000000000000001),
+ 0b0'00000000000'0000000000000000000000000000000000000000000000000001_u64,
u64(Rep::min_subnormal()));
EXPECT_EQ(
- u64(0b0'00000000000'1111111111111111111111111111111111111111111111111111),
+ 0b0'00000000000'1111111111111111111111111111111111111111111111111111_u64,
u64(Rep::max_subnormal()));
EXPECT_EQ(
- u64(0b0'00000000001'0000000000000000000000000000000000000000000000000000),
+ 0b0'00000000001'0000000000000000000000000000000000000000000000000000_u64,
u64(Rep::min_normal()));
EXPECT_EQ(
- u64(0b0'11111111110'1111111111111111111111111111111111111111111111111111),
+ 0b0'11111111110'1111111111111111111111111111111111111111111111111111_u64,
u64(Rep::max_normal()));
EXPECT_EQ(
- u64(0b0'11111111111'0000000000000000000000000000000000000000000000000000),
+ 0b0'11111111111'0000000000000000000000000000000000000000000000000000_u64,
u64(Rep::inf()));
EXPECT_EQ(
- u64(0b0'11111111111'0100000000000000000000000000000000000000000000000000),
+ 0b0'11111111111'0100000000000000000000000000000000000000000000000000_u64,
u64(Rep::signaling_nan()));
EXPECT_EQ(
- u64(0b0'11111111111'1000000000000000000000000000000000000000000000000000),
+ 0b0'11111111111'1000000000000000000000000000000000000000000000000000_u64,
u64(Rep::quiet_nan()));
}
-static constexpr UInt128 u128(uint64_t hi, uint64_t lo) {
-#if defined(__SIZEOF_INT128__)
- return __uint128_t(hi) << 64 | __uint128_t(lo);
-#else
- return UInt128({lo, hi});
-#endif
-}
-
TEST(LlvmLibcFPBitsTest, FPType_IEEE754_Binary128) {
using Rep = FPRep<FPType::IEEE754_Binary128>;
EXPECT_EQ(
- u128(0b0'000000000000000'000000000000000000000000000000000000000000000000,
- 0b0000000000000000000000000000000000000000000000000000000000000000),
+ 0b0'000000000000000'0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000_u128,
UInt128(Rep::zero()));
EXPECT_EQ(
- u128(0b0'011111111111111'000000000000000000000000000000000000000000000000,
- 0b0000000000000000000000000000000000000000000000000000000000000000),
+ 0b0'011111111111111'0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000_u128,
UInt128(Rep::one()));
EXPECT_EQ(
- u128(0b0'000000000000000'000000000000000000000000000000000000000000000000,
- 0b0000000000000000000000000000000000000000000000000000000000000001),
+ 0b0'000000000000000'0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001_u128,
UInt128(Rep::min_subnormal()));
EXPECT_EQ(
- u128(0b0'000000000000000'111111111111111111111111111111111111111111111111,
- 0b1111111111111111111111111111111111111111111111111111111111111111),
+ 0b0'000000000000000'1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111_u128,
UInt128(Rep::max_subnormal()));
EXPECT_EQ(
- u128(0b0'000000000000001'000000000000000000000000000000000000000000000000,
- 0b0000000000000000000000000000000000000000000000000000000000000000),
+ 0b0'000000000000001'0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000_u128,
UInt128(Rep::min_normal()));
EXPECT_EQ(
- u128(0b0'111111111111110'111111111111111111111111111111111111111111111111,
- 0b1111111111111111111111111111111111111111111111111111111111111111),
+ 0b0'111111111111110'1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111_u128,
UInt128(Rep::max_normal()));
EXPECT_EQ(
- u128(0b0'111111111111111'000000000000000000000000000000000000000000000000,
- 0b0000000000000000000000000000000000000000000000000000000000000000),
+ 0b0'111111111111111'0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000_u128,
UInt128(Rep::inf()));
EXPECT_EQ(
- u128(0b0'111111111111111'010000000000000000000000000000000000000000000000,
- 0b0000000000000000000000000000000000000000000000000000000000000000),
+ 0b0'111111111111111'0100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000_u128,
UInt128(Rep::signaling_nan()));
EXPECT_EQ(
- u128(0b0'111111111111111'100000000000000000000000000000000000000000000000,
- 0b0000000000000000000000000000000000000000000000000000000000000000),
+ 0b0'111111111111111'1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000_u128,
UInt128(Rep::quiet_nan()));
}
@@ -134,89 +123,73 @@ TEST(LlvmLibcFPBitsTest, FPType_X86_Binary80) {
using Rep = FPRep<FPType::X86_Binary80>;
EXPECT_EQ(
- u128(0b0'000000000000000,
- 0b0000000000000000000000000000000000000000000000000000000000000000),
+ 0b0'0000000000000000000000000000000000000000000000000000000000000000000000000000000_u128,
UInt128(Rep::zero()));
EXPECT_EQ(
- u128(0b0'011111111111111,
- 0b1000000000000000000000000000000000000000000000000000000000000000),
+ 0b0'0111111111111111000000000000000000000000000000000000000000000000000000000000000_u128,
UInt128(Rep::one()));
EXPECT_EQ(
- u128(0b0'000000000000000,
- 0b0000000000000000000000000000000000000000000000000000000000000001),
+ 0b0'0000000000000000000000000000000000000000000000000000000000000000000000000000001_u128,
UInt128(Rep::min_subnormal()));
EXPECT_EQ(
- u128(0b0'000000000000000,
- 0b0111111111111111111111111111111111111111111111111111111111111111),
+ 0b0'0000000000000000111111111111111111111111111111111111111111111111111111111111111_u128,
UInt128(Rep::max_subnormal()));
EXPECT_EQ(
- u128(0b0'000000000000001,
- 0b1000000000000000000000000000000000000000000000000000000000000000),
+ 0b0'0000000000000011000000000000000000000000000000000000000000000000000000000000000_u128,
UInt128(Rep::min_normal()));
EXPECT_EQ(
- u128(0b0'111111111111110,
- 0b1111111111111111111111111111111111111111111111111111111111111111),
+ 0b0'1111111111111101111111111111111111111111111111111111111111111111111111111111111_u128,
UInt128(Rep::max_normal()));
EXPECT_EQ(
- u128(0b0'111111111111111,
- 0b1000000000000000000000000000000000000000000000000000000000000000),
+ 0b0'1111111111111111000000000000000000000000000000000000000000000000000000000000000_u128,
UInt128(Rep::inf()));
EXPECT_EQ(
- u128(0b0'111111111111111,
- 0b1010000000000000000000000000000000000000000000000000000000000000),
+ 0b0'1111111111111111010000000000000000000000000000000000000000000000000000000000000_u128,
UInt128(Rep::signaling_nan()));
EXPECT_EQ(
- u128(0b0'111111111111111,
- 0b1100000000000000000000000000000000000000000000000000000000000000),
+ 0b0'1111111111111111100000000000000000000000000000000000000000000000000000000000000_u128,
UInt128(Rep::quiet_nan()));
}
TEST(LlvmLibcFPBitsTest, FPType_X86_Binary80_IsNan) {
using Rep = FPRep<FPType::X86_Binary80>;
- const auto is_nan = [](uint64_t hi, uint64_t lo) {
- Rep rep;
- rep.set_uintval(u128(hi, lo));
- return rep.is_nan();
- };
-
- EXPECT_TRUE(is_nan(
- 0b0'111111111111111, // NAN : Pseudo-Infinity
- 0b0000000000000000000000000000000000000000000000000000000000000000));
- EXPECT_TRUE(is_nan(
- 0b0'111111111111111, // NAN : Pseudo Not a Number
- 0b0000000000000000000000000000000000000000000000000000000000000001));
- EXPECT_TRUE(is_nan(
- 0b0'111111111111111, // NAN : Pseudo Not a Number
- 0b0100000000000000000000000000000000000000000000000000000000000000));
- EXPECT_TRUE(is_nan(
- 0b0'111111111111111, // NAN : Signalling Not a Number
- 0b1000000000000000000000000000000000000000000000000000000000000001));
- EXPECT_TRUE(is_nan(
- 0b0'111111111111111, // NAN : Floating-point Indefinite
- 0b1100000000000000000000000000000000000000000000000000000000000000));
- EXPECT_TRUE(is_nan(
- 0b0'111111111111111, // NAN : Quiet Not a Number
- 0b1100000000000000000000000000000000000000000000000000000000000001));
- EXPECT_TRUE(is_nan(
- 0b0'111111111111110, // NAN : Unnormal
- 0b0000000000000000000000000000000000000000000000000000000000000000));
-
- EXPECT_FALSE(is_nan(
- 0b0'000000000000000, // Zero
- 0b0000000000000000000000000000000000000000000000000000000000000000));
- EXPECT_FALSE(is_nan(
- 0b0'000000000000000, // Subnormal
- 0b0000000000000000000000000000000000000000000000000000000000000001));
- EXPECT_FALSE(is_nan(
- 0b0'000000000000000, // Pseudo Denormal
- 0b1000000000000000000000000000000000000000000000000000000000000001));
- EXPECT_FALSE(is_nan(
- 0b0'111111111111111, // Infinity
- 0b1000000000000000000000000000000000000000000000000000000000000000));
- EXPECT_FALSE(is_nan(
- 0b0'111111111111110, // Normalized
- 0b1000000000000000000000000000000000000000000000000000000000000000));
+ EXPECT_TRUE( // NAN : Pseudo-Infinity
+ Rep(0b0'111111111111111'0000000000000000000000000000000000000000000000000000000000000000_u128)
+ .is_nan());
+ EXPECT_TRUE( // NAN : Pseudo Not a Number
+ Rep(0b0'111111111111111'0000000000000000000000000000000000000000000000000000000000000001_u128)
+ .is_nan());
+ EXPECT_TRUE( // NAN : Pseudo Not a Number
+ Rep(0b0'111111111111111'0100000000000000000000000000000000000000000000000000000000000000_u128)
+ .is_nan());
+ EXPECT_TRUE( // NAN : Signalling Not a Number
+ Rep(0b0'111111111111111'1000000000000000000000000000000000000000000000000000000000000001_u128)
+ .is_nan());
+ EXPECT_TRUE( // NAN : Floating-point Indefinite
+ Rep(0b0'111111111111111'1100000000000000000000000000000000000000000000000000000000000000_u128)
+ .is_nan());
+ EXPECT_TRUE( // NAN : Quiet Not a Number
+ Rep(0b0'111111111111111'1100000000000000000000000000000000000000000000000000000000000001_u128)
+ .is_nan());
+ EXPECT_TRUE( // NAN : Unnormal
+ Rep(0b0'111111111111110'0000000000000000000000000000000000000000000000000000000000000000_u128)
+ .is_nan());
+ EXPECT_FALSE( // Zero
+ Rep(0b0'000000000000000'0000000000000000000000000000000000000000000000000000000000000000_u128)
+ .is_nan());
+ EXPECT_FALSE( // Subnormal
+ Rep(0b0'000000000000000'0000000000000000000000000000000000000000000000000000000000000001_u128)
+ .is_nan());
+ EXPECT_FALSE( // Pseudo Denormal
+ Rep(0b0'000000000000000'1000000000000000000000000000000000000000000000000000000000000001_u128)
+ .is_nan());
+ EXPECT_FALSE( // Infinity
+ Rep(0b0'111111111111111'1000000000000000000000000000000000000000000000000000000000000000_u128)
+ .is_nan());
+ EXPECT_FALSE( // Normalized
+ Rep(0b0'111111111111110'1000000000000000000000000000000000000000000000000000000000000000_u128)
+ .is_nan());
}
enum class FP {
@@ -339,49 +312,49 @@ TEST(LlvmLibcFPBitsTest, FloatType) {
FloatBits zero(0.0f);
EXPECT_TRUE(zero.is_pos());
- EXPECT_EQ(zero.get_biased_exponent(), static_cast<uint16_t>(0));
- EXPECT_EQ(zero.get_mantissa(), static_cast<uint32_t>(0));
- EXPECT_EQ(zero.uintval(), static_cast<uint32_t>(0x00000000));
+ EXPECT_EQ(zero.get_biased_exponent(), 0_u16);
+ EXPECT_EQ(zero.get_mantissa(), 0_u32);
+ EXPECT_EQ(zero.uintval(), 0_u32);
EXPECT_STREQ(LIBC_NAMESPACE::str(zero).c_str(),
"0x00000000 = (S: 0, E: 0x0000, M: 0x00000000)");
FloatBits negzero(-0.0f);
EXPECT_TRUE(negzero.is_neg());
- EXPECT_EQ(negzero.get_biased_exponent(), static_cast<uint16_t>(0));
- EXPECT_EQ(negzero.get_mantissa(), static_cast<uint32_t>(0));
- EXPECT_EQ(negzero.uintval(), static_cast<uint32_t>(0x80000000));
+ EXPECT_EQ(negzero.get_biased_exponent(), 0_u16);
+ EXPECT_EQ(negzero.get_mantissa(), 0_u32);
+ EXPECT_EQ(negzero.uintval(), 0x80000000_u32);
EXPECT_STREQ(LIBC_NAMESPACE::str(negzero).c_str(),
"0x80000000 = (S: 1, E: 0x0000, M: 0x00000000)");
FloatBits one(1.0f);
EXPECT_TRUE(one.is_pos());
- EXPECT_EQ(one.get_biased_exponent(), static_cast<uint16_t>(0x7F));
- EXPECT_EQ(one.get_mantissa(), static_cast<uint32_t>(0));
- EXPECT_EQ(one.uintval(), static_cast<uint32_t>(0x3F800000));
+ EXPECT_EQ(one.get_biased_exponent(), 0x7F_u16);
+ EXPECT_EQ(one.get_mantissa(), 0_u32);
+ EXPECT_EQ(one.uintval(), 0x3F800000_u32);
EXPECT_STREQ(LIBC_NAMESPACE::str(one).c_str(),
"0x3F800000 = (S: 0, E: 0x007F, M: 0x00000000)");
FloatBits negone(-1.0f);
EXPECT_TRUE(negone.is_neg());
- EXPECT_EQ(negone.get_biased_exponent(), static_cast<uint16_t>(0x7F));
- EXPECT_EQ(negone.get_mantissa(), static_cast<uint32_t>(0));
- EXPECT_EQ(negone.uintval(), static_cast<uint32_t>(0xBF800000));
+ EXPECT_EQ(negone.get_biased_exponent(), 0x7F_u16);
+ EXPECT_EQ(negone.get_mantissa(), 0_u32);
+ EXPECT_EQ(negone.uintval(), 0xBF800000_u32);
EXPECT_STREQ(LIBC_NAMESPACE::str(negone).c_str(),
"0xBF800000 = (S: 1, E: 0x007F, M: 0x00000000)");
FloatBits num(1.125f);
EXPECT_TRUE(num.is_pos());
- EXPECT_EQ(num.get_biased_exponent(), static_cast<uint16_t>(0x7F));
- EXPECT_EQ(num.get_mantissa(), static_cast<uint32_t>(0x00100000));
- EXPECT_EQ(num.uintval(), static_cast<uint32_t>(0x3F900000));
+ EXPECT_EQ(num.get_biased_exponent(), 0x7F_u16);
+ EXPECT_EQ(num.get_mantissa(), 0x00100000_u32);
+ EXPECT_EQ(num.uintval(), 0x3F900000_u32);
EXPECT_STREQ(LIBC_NAMESPACE::str(num).c_str(),
"0x3F900000 = (S: 0, E: 0x007F, M: 0x00100000)");
FloatBits negnum(-1.125f);
EXPECT_TRUE(negnum.is_neg());
- EXPECT_EQ(negnum.get_biased_exponent(), static_cast<uint16_t>(0x7F));
- EXPECT_EQ(negnum.get_mantissa(), static_cast<uint32_t>(0x00100000));
- EXPECT_EQ(negnum.uintval(), static_cast<uint32_t>(0xBF900000));
+ EXPECT_EQ(negnum.get_biased_exponent(), 0x7F_u16);
+ EXPECT_EQ(negnum.get_mantissa(), 0x00100000_u32);
+ EXPECT_EQ(negnum.uintval(), 0xBF900000_u32);
EXPECT_STREQ(LIBC_NAMESPACE::str(negnum).c_str(),
"0xBF900000 = (S: 1, E: 0x007F, M: 0x00100000)");
@@ -401,49 +374,49 @@ TEST(LlvmLibcFPBitsTest, DoubleType) {
DoubleBits zero(0.0);
EXPECT_TRUE(zero.is_pos());
- EXPECT_EQ(zero.get_biased_exponent(), static_cast<uint16_t>(0x0000));
- EXPECT_EQ(zero.get_mantissa(), static_cast<uint64_t>(0x0000000000000000));
- EXPECT_EQ(zero.uintval(), static_cast<uint64_t>(0x0000000000000000));
+ EXPECT_EQ(zero.get_biased_exponent(), 0_u16);
+ EXPECT_EQ(zero.get_mantissa(), 0_u64);
+ EXPECT_EQ(zero.uintval(), 0_u64);
EXPECT_STREQ(LIBC_NAMESPACE::str(zero).c_str(),
"0x0000000000000000 = (S: 0, E: 0x0000, M: 0x0000000000000000)");
DoubleBits negzero(-0.0);
EXPECT_TRUE(negzero.is_neg());
- EXPECT_EQ(negzero.get_biased_exponent(), static_cast<uint16_t>(0x0000));
- EXPECT_EQ(negzero.get_mantissa(), static_cast<uint64_t>(0x0000000000000000));
- EXPECT_EQ(negzero.uintval(), static_cast<uint64_t>(0x8000000000000000));
+ EXPECT_EQ(negzero.get_biased_exponent(), 0_u16);
+ EXPECT_EQ(negzero.get_mantissa(), 0_u64);
+ EXPECT_EQ(negzero.uintval(), 0x8000000000000000_u64);
EXPECT_STREQ(LIBC_NAMESPACE::str(negzero).c_str(),
"0x8000000000000000 = (S: 1, E: 0x0000, M: 0x0000000000000000)");
DoubleBits one(1.0);
EXPECT_TRUE(one.is_pos());
- EXPECT_EQ(one.get_biased_exponent(), static_cast<uint16_t>(0x03FF));
- EXPECT_EQ(one.get_mantissa(), static_cast<uint64_t>(0x0000000000000000));
- EXPECT_EQ(one.uintval(), static_cast<uint64_t>(0x3FF0000000000000));
+ EXPECT_EQ(one.get_biased_exponent(), 0x03FF_u16);
+ EXPECT_EQ(one.get_mantissa(), 0_u64);
+ EXPECT_EQ(one.uintval(), 0x3FF0000000000000_u64);
EXPECT_STREQ(LIBC_NAMESPACE::str(one).c_str(),
"0x3FF0000000000000 = (S: 0, E: 0x03FF, M: 0x0000000000000000)");
DoubleBits negone(-1.0);
EXPECT_TRUE(negone.is_neg());
- EXPECT_EQ(negone.get_biased_exponent(), static_cast<uint16_t>(0x03FF));
- EXPECT_EQ(negone.get_mantissa(), static_cast<uint64_t>(0x0000000000000000));
- EXPECT_EQ(negone.uintval(), static_cast<uint64_t>(0xBFF0000000000000));
+ EXPECT_EQ(negone.get_biased_exponent(), 0x03FF_u16);
+ EXPECT_EQ(negone.get_mantissa(), 0_u64);
+ EXPECT_EQ(negone.uintval(), 0xBFF0000000000000_u64);
EXPECT_STREQ(LIBC_NAMESPACE::str(negone).c_str(),
"0xBFF0000000000000 = (S: 1, E: 0x03FF, M: 0x0000000000000000)");
DoubleBits num(1.125);
EXPECT_TRUE(num.is_pos());
- EXPECT_EQ(num.get_biased_exponent(), static_cast<uint16_t>(0x03FF));
- EXPECT_EQ(num.get_mantissa(), static_cast<uint64_t>(0x0002000000000000));
- EXPECT_EQ(num.uintval(), static_cast<uint64_t>(0x3FF2000000000000));
+ EXPECT_EQ(num.get_biased_exponent(), 0x03FF_u16);
+ EXPECT_EQ(num.get_mantissa(), 0x0002000000000000_u64);
+ EXPECT_EQ(num.uintval(), 0x3FF2000000000000_u64);
EXPECT_STREQ(LIBC_NAMESPACE::str(num).c_str(),
"0x3FF2000000000000 = (S: 0, E: 0x03FF, M: 0x0002000000000000)");
DoubleBits negnum(-1.125);
EXPECT_TRUE(negnum.is_neg());
- EXPECT_EQ(negnum.get_biased_exponent(), static_cast<uint16_t>(0x03FF));
- EXPECT_EQ(negnum.get_mantissa(), static_cast<uint64_t>(0x0002000000000000));
- EXPECT_EQ(negnum.uintval(), static_cast<uint64_t>(0xBFF2000000000000));
+ EXPECT_EQ(negnum.get_biased_exponent(), 0x03FF_u16);
+ EXPECT_EQ(negnum.get_mantissa(), 0x0002000000000000_u64);
+ EXPECT_EQ(negnum.uintval(), 0xBFF2000000000000_u64);
EXPECT_STREQ(LIBC_NAMESPACE::str(negnum).c_str(),
"0xBFF2000000000000 = (S: 1, E: 0x03FF, M: 0x0002000000000000)");
@@ -467,10 +440,9 @@ TEST(LlvmLibcFPBitsTest, X86LongDoubleType) {
LongDoubleBits zero(0.0l);
EXPECT_TRUE(zero.is_pos());
- EXPECT_EQ(zero.get_biased_exponent(), static_cast<uint16_t>(0x0000));
- EXPECT_EQ(zero.get_mantissa(), static_cast<UInt128>(0x0000000000000000)
- << 64);
- EXPECT_EQ(zero.uintval(), static_cast<UInt128>(0x0000000000000000) << 64);
+ EXPECT_EQ(zero.get_biased_exponent(), 0_u16);
+ EXPECT_EQ(zero.get_mantissa(), 0_u128);
+ EXPECT_EQ(zero.uintval(), 0_u128);
EXPECT_STREQ(
LIBC_NAMESPACE::str(zero).c_str(),
"0x00000000000000000000000000000000 = "
@@ -478,10 +450,9 @@ TEST(LlvmLibcFPBitsTest, X86LongDoubleType) {
LongDoubleBits negzero(-0.0l);
EXPECT_TRUE(negzero.is_neg());
- EXPECT_EQ(negzero.get_biased_exponent(), static_cast<uint16_t>(0x0000));
- EXPECT_EQ(negzero.get_mantissa(), static_cast<UInt128>(0x0000000000000000)
- << 64);
- EXPECT_EQ(negzero.uintval(), static_cast<UInt128>(0x1) << 79);
+ EXPECT_EQ(negzero.get_biased_exponent(), 0_u16);
+ EXPECT_EQ(negzero.get_mantissa(), 0_u128);
+ EXPECT_EQ(negzero.uintval(), 0x80000000000000000000_u128);
EXPECT_STREQ(
LIBC_NAMESPACE::str(negzero).c_str(),
"0x00000000000080000000000000000000 = "
@@ -489,9 +460,9 @@ TEST(LlvmLibcFPBitsTest, X86LongDoubleType) {
LongDoubleBits one(1.0l);
EXPECT_TRUE(one.is_pos());
- EXPECT_EQ(one.get_biased_exponent(), static_cast<uint16_t>(0x3FFF));
- EXPECT_EQ(one.get_mantissa(), static_cast<UInt128>(0x0000000000000000) << 64);
- EXPECT_EQ(one.uintval(), static_cast<UInt128>(0x3FFF8) << 60);
+ EXPECT_EQ(one.get_biased_exponent(), 0x3FFF_u16);
+ EXPECT_EQ(one.get_mantissa(), 0_u128);
+ EXPECT_EQ(one.uintval(), 0x3FFF8000000000000000_u128);
EXPECT_STREQ(
LIBC_NAMESPACE::str(one).c_str(),
"0x0000000000003FFF8000000000000000 = "
@@ -499,10 +470,9 @@ TEST(LlvmLibcFPBitsTest, X86LongDoubleType) {
LongDoubleBits negone(-1.0l);
EXPECT_TRUE(negone.is_neg());
- EXPECT_EQ(negone.get_biased_exponent(), static_cast<uint16_t>(0x3FFF));
- EXPECT_EQ(negone.get_mantissa(), static_cast<UInt128>(0x0000000000000000)
- << 64);
- EXPECT_EQ(negone.uintval(), static_cast<UInt128>(0xBFFF8) << 60);
+ EXPECT_EQ(negone.get_biased_exponent(), 0x3FFF_u16);
+ EXPECT_EQ(negone.get_mantissa(), 0_u128);
+ EXPECT_EQ(negone.uintval(), 0xBFFF8000000000000000_u128);
EXPECT_STREQ(
LIBC_NAMESPACE::str(negone).c_str(),
"0x000000000000BFFF8000000000000000 = "
@@ -510,9 +480,9 @@ TEST(LlvmLibcFPBitsTest, X86LongDoubleType) {
LongDoubleBits num(1.125l);
EXPECT_TRUE(num.is_pos());
- EXPECT_EQ(num.get_biased_exponent(), static_cast<uint16_t>(0x3FFF));
- EXPECT_EQ(num.get_mantissa(), static_cast<UInt128>(0x1) << 60);
- EXPECT_EQ(num.uintval(), static_cast<UInt128>(0x3FFF9) << 60);
+ EXPECT_EQ(num.get_biased_exponent(), 0x3FFF_u16);
+ EXPECT_EQ(num.get_mantissa(), 0x1000000000000000_u128);
+ EXPECT_EQ(num.uintval(), 0x3FFF9000000000000000_u128);
EXPECT_STREQ(
LIBC_NAMESPACE::str(num).c_str(),
"0x0000000000003FFF9000000000000000 = "
@@ -520,9 +490,9 @@ TEST(LlvmLibcFPBitsTest, X86LongDoubleType) {
LongDoubleBits negnum(-1.125l);
EXPECT_TRUE(negnum.is_neg());
- EXPECT_EQ(negnum.get_biased_exponent(), static_cast<uint16_t>(0x3FFF));
- EXPECT_EQ(negnum.get_mantissa(), static_cast<UInt128>(0x1) << 60);
- EXPECT_EQ(negnum.uintval(), static_cast<UInt128>(0xBFFF9) << 60);
+ EXPECT_EQ(negnum.get_biased_exponent(), 0x3FFF_u16);
+ EXPECT_EQ(negnum.get_mantissa(), 0x1000000000000000_u128);
+ EXPECT_EQ(negnum.uintval(), 0xBFFF9000000000000000_u128);
EXPECT_STREQ(
LIBC_NAMESPACE::str(negnum).c_str(),
"0x000000000000BFFF9000000000000000 = "
@@ -547,57 +517,54 @@ TEST(LlvmLibcFPBitsTest, LongDoubleType) {
LongDoubleBits zero(0.0l);
EXPECT_TRUE(zero.is_pos());
- EXPECT_EQ(zero.get_biased_exponent(), static_cast<uint16_t>(0x0000));
- EXPECT_EQ(zero.get_mantissa(), static_cast<UInt128>(0x0000000000000000)
- << 64);
- EXPECT_EQ(zero.uintval(), static_cast<UInt128>(0x0000000000000000) << 64);
+ EXPECT_EQ(zero.get_biased_exponent(), 0_u16);
+ EXPECT_EQ(zero.get_mantissa(), 0_u128);
+ EXPECT_EQ(zero.uintval(), 0_u128);
EXPECT_STREQ(LIBC_NAMESPACE::str(zero).c_str(),
"0x00000000000000000000000000000000 = "
"(S: 0, E: 0x0000, M: 0x00000000000000000000000000000000)");
LongDoubleBits negzero(-0.0l);
EXPECT_TRUE(negzero.is_neg());
- EXPECT_EQ(negzero.get_biased_exponent(), static_cast<uint16_t>(0x0000));
- EXPECT_EQ(negzero.get_mantissa(), static_cast<UInt128>(0x0000000000000000)
- << 64);
- EXPECT_EQ(negzero.uintval(), static_cast<UInt128>(0x1) << 127);
+ EXPECT_EQ(negzero.get_biased_exponent(), 0_u16);
+ EXPECT_EQ(negzero.get_mantissa(), 0_u128);
+ EXPECT_EQ(negzero.uintval(), 0x80000000000000000000000000000000_u128);
EXPECT_STREQ(LIBC_NAMESPACE::str(negzero).c_str(),
"0x80000000000000000000000000000000 = "
"(S: 1, E: 0x0000, M: 0x00000000000000000000000000000000)");
LongDoubleBits one(1.0l);
EXPECT_TRUE(one.is_pos());
- EXPECT_EQ(one.get_biased_exponent(), static_cast<uint16_t>(0x3FFF));
- EXPECT_EQ(one.get_mantissa(), static_cast<UInt128>(0x0000000000000000) << 64);
- EXPECT_EQ(one.uintval(), static_cast<UInt128>(0x3FFF) << 112);
+ EXPECT_EQ(one.get_biased_exponent(), 0x3FFF_u16);
+ EXPECT_EQ(one.get_mantissa(), 0_u128);
+ EXPECT_EQ(one.uintval(), 0x3FFF0000000000000000000000000000_u128);
EXPECT_STREQ(LIBC_NAMESPACE::str(one).c_str(),
"0x3FFF0000000000000000000000000000 = "
"(S: 0, E: 0x3FFF, M: 0x00000000000000000000000000000000)");
LongDoubleBits negone(-1.0l);
EXPECT_TRUE(negone.is_neg());
- EXPECT_EQ(negone.get_biased_exponent(), static_cast<uint16_t>(0x3FFF));
- EXPECT_EQ(negone.get_mantissa(), static_cast<UInt128>(0x0000000000000000)
- << 64);
- EXPECT_EQ(negone.uintval(), static_cast<UInt128>(0xBFFF) << 112);
+ EXPECT_EQ(negone.get_biased_exponent(), 0x3FFF_u16);
+ EXPECT_EQ(negone.get_mantissa(), 0_u128);
+ EXPECT_EQ(negone.uintval(), 0xBFFF0000000000000000000000000000_u128);
EXPECT_STREQ(LIBC_NAMESPACE::str(negone).c_str(),
"0xBFFF0000000000000000000000000000 = "
"(S: 1, E: 0x3FFF, M: 0x00000000000000000000000000000000)");
LongDoubleBits num(1.125l);
EXPECT_TRUE(num.is_pos());
- EXPECT_EQ(num.get_biased_exponent(), static_cast<uint16_t>(0x3FFF));
- EXPECT_EQ(num.get_mantissa(), static_cast<UInt128>(0x2) << 108);
- EXPECT_EQ(num.uintval(), static_cast<UInt128>(0x3FFF2) << 108);
+ EXPECT_EQ(num.get_biased_exponent(), 0x3FFF_u16);
+ EXPECT_EQ(num.get_mantissa(), 0x2000000000000000000000000000_u128);
+ EXPECT_EQ(num.uintval(), 0x3FFF2000000000000000000000000000_u128);
EXPECT_STREQ(LIBC_NAMESPACE::str(num).c_str(),
"0x3FFF2000000000000000000000000000 = "
"(S: 0, E: 0x3FFF, M: 0x00002000000000000000000000000000)");
LongDoubleBits negnum(-1.125l);
EXPECT_TRUE(negnum.is_neg());
- EXPECT_EQ(negnum.get_biased_exponent(), static_cast<uint16_t>(0x3FFF));
- EXPECT_EQ(negnum.get_mantissa(), static_cast<UInt128>(0x2) << 108);
- EXPECT_EQ(negnum.uintval(), static_cast<UInt128>(0xBFFF2) << 108);
+ EXPECT_EQ(negnum.get_biased_exponent(), 0x3FFF_u16);
+ EXPECT_EQ(negnum.get_mantissa(), 0x2000000000000000000000000000_u128);
+ EXPECT_EQ(negnum.uintval(), 0xBFFF2000000000000000000000000000_u128);
EXPECT_STREQ(LIBC_NAMESPACE::str(negnum).c_str(),
"0xBFFF2000000000000000000000000000 = "
"(S: 1, E: 0x3FFF, M: 0x00002000000000000000000000000000)");
@@ -621,57 +588,54 @@ TEST(LlvmLibcFPBitsTest, Float128Type) {
Float128Bits zero = Float128Bits::zero(Sign::POS);
EXPECT_TRUE(zero.is_pos());
- EXPECT_EQ(zero.get_biased_exponent(), static_cast<uint16_t>(0x0000));
- EXPECT_EQ(zero.get_mantissa(), static_cast<UInt128>(0x0000000000000000)
- << 64);
- EXPECT_EQ(zero.uintval(), static_cast<UInt128>(0x0000000000000000) << 64);
+ EXPECT_EQ(zero.get_biased_exponent(), 0_u16);
+ EXPECT_EQ(zero.get_mantissa(), 0_u128);
+ EXPECT_EQ(zero.uintval(), 0_u128);
EXPECT_STREQ(LIBC_NAMESPACE::str(zero).c_str(),
"0x00000000000000000000000000000000 = "
"(S: 0, E: 0x0000, M: 0x00000000000000000000000000000000)");
Float128Bits negzero = Float128Bits::zero(Sign::NEG);
EXPECT_TRUE(negzero.is_neg());
- EXPECT_EQ(negzero.get_biased_exponent(), static_cast<uint16_t>(0x0000));
- EXPECT_EQ(negzero.get_mantissa(), static_cast<UInt128>(0x0000000000000000)
- << 64);
- EXPECT_EQ(negzero.uintval(), static_cast<UInt128>(0x1) << 127);
+ EXPECT_EQ(negzero.get_biased_exponent(), 0_u16);
+ EXPECT_EQ(negzero.get_mantissa(), 0_u128);
+ EXPECT_EQ(negzero.uintval(), 0x80000000000000000000000000000000_u128);
EXPECT_STREQ(LIBC_NAMESPACE::str(negzero).c_str(),
"0x80000000000000000000000000000000 = "
"(S: 1, E: 0x0000, M: 0x00000000000000000000000000000000)");
Float128Bits one(float128(1.0));
EXPECT_TRUE(one.is_pos());
- EXPECT_EQ(one.get_biased_exponent(), static_cast<uint16_t>(0x3FFF));
- EXPECT_EQ(one.get_mantissa(), static_cast<UInt128>(0x0000000000000000) << 64);
- EXPECT_EQ(one.uintval(), static_cast<UInt128>(0x3FFF) << 112);
+ EXPECT_EQ(one.get_biased_exponent(), 0x3FFF_u16);
+ EXPECT_EQ(one.get_mantissa(), 0_u128);
+ EXPECT_EQ(one.uintval(), 0x3FFF0000000000000000000000000000_u128);
EXPECT_STREQ(LIBC_NAMESPACE::str(one).c_str(),
"0x3FFF0000000000000000000000000000 = "
"(S: 0, E: 0x3FFF, M: 0x00000000000000000000000000000000)");
Float128Bits negone(float128(-1.0));
EXPECT_TRUE(negone.is_neg());
- EXPECT_EQ(negone.get_biased_exponent(), static_cast<uint16_t>(0x3FFF));
- EXPECT_EQ(negone.get_mantissa(), static_cast<UInt128>(0x0000000000000000)
- << 64);
- EXPECT_EQ(negone.uintval(), static_cast<UInt128>(0xBFFF) << 112);
+ EXPECT_EQ(negone.get_biased_exponent(), 0x3FFF_u16);
+ EXPECT_EQ(negone.get_mantissa(), 0_u128);
+ EXPECT_EQ(negone.uintval(), 0xBFFF0000000000000000000000000000_u128);
EXPECT_STREQ(LIBC_NAMESPACE::str(negone).c_str(),
"0xBFFF0000000000000000000000000000 = "
"(S: 1, E: 0x3FFF, M: 0x00000000000000000000000000000000)");
Float128Bits num(float128(1.125));
EXPECT_TRUE(num.is_pos());
- EXPECT_EQ(num.get_biased_exponent(), static_cast<uint16_t>(0x3FFF));
- EXPECT_EQ(num.get_mantissa(), static_cast<UInt128>(0x2) << 108);
- EXPECT_EQ(num.uintval(), static_cast<UInt128>(0x3FFF2) << 108);
+ EXPECT_EQ(num.get_biased_exponent(), 0x3FFF_u16);
+ EXPECT_EQ(num.get_mantissa(), 0x2000000000000000000000000000_u128);
+ EXPECT_EQ(num.uintval(), 0x3FFF2000000000000000000000000000_u128);
EXPECT_STREQ(LIBC_NAMESPACE::str(num).c_str(),
"0x3FFF2000000000000000000000000000 = "
"(S: 0, E: 0x3FFF, M: 0x00002000000000000000000000000000)");
Float128Bits negnum(float128(-1.125));
EXPECT_TRUE(negnum.is_neg());
- EXPECT_EQ(negnum.get_biased_exponent(), static_cast<uint16_t>(0x3FFF));
- EXPECT_EQ(negnum.get_mantissa(), static_cast<UInt128>(0x2) << 108);
- EXPECT_EQ(negnum.uintval(), static_cast<UInt128>(0xBFFF2) << 108);
+ EXPECT_EQ(negnum.get_biased_exponent(), 0x3FFF_u16);
+ EXPECT_EQ(negnum.get_mantissa(), 0x2000000000000000000000000000_u128);
+ EXPECT_EQ(negnum.uintval(), 0xBFFF2000000000000000000000000000_u128);
EXPECT_STREQ(LIBC_NAMESPACE::str(negnum).c_str(),
"0xBFFF2000000000000000000000000000 = "
"(S: 1, E: 0x3FFF, M: 0x00002000000000000000000000000000)");
diff --git a/libc/test/src/__support/RPC/rpc_smoke_test.cpp b/libc/test/src/__support/RPC/rpc_smoke_test.cpp
index 54821e2..58b318c 100644
--- a/libc/test/src/__support/RPC/rpc_smoke_test.cpp
+++ b/libc/test/src/__support/RPC/rpc_smoke_test.cpp
@@ -13,12 +13,8 @@
namespace {
enum { lane_size = 8, port_count = 4 };
-struct Packet {
- uint64_t unused;
-};
-
-using ProcAType = LIBC_NAMESPACE::rpc::Process<false, Packet>;
-using ProcBType = LIBC_NAMESPACE::rpc::Process<true, Packet>;
+using ProcAType = LIBC_NAMESPACE::rpc::Process<false>;
+using ProcBType = LIBC_NAMESPACE::rpc::Process<true>;
static_assert(ProcAType::inbox_offset(port_count) ==
ProcBType::outbox_offset(port_count));
@@ -26,7 +22,7 @@ static_assert(ProcAType::inbox_offset(port_count) ==
static_assert(ProcAType::outbox_offset(port_count) ==
ProcBType::inbox_offset(port_count));
-enum { alloc_size = ProcAType::allocation_size(port_count) };
+enum { alloc_size = ProcAType::allocation_size(port_count, 1) };
alignas(64) char buffer[alloc_size] = {0};
} // namespace
diff --git a/libc/test/src/__support/integer_literals_test.cpp b/libc/test/src/__support/integer_literals_test.cpp
new file mode 100644
index 0000000..10c3625
--- /dev/null
+++ b/libc/test/src/__support/integer_literals_test.cpp
@@ -0,0 +1,134 @@
+
+//===-- Unittests for user defined integer literals -----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/integer_literals.h"
+#include "test/UnitTest/Test.h"
+
+using LIBC_NAMESPACE::operator""_u8;
+using LIBC_NAMESPACE::operator""_u16;
+using LIBC_NAMESPACE::operator""_u32;
+using LIBC_NAMESPACE::operator""_u64;
+using LIBC_NAMESPACE::operator""_u128;
+using LIBC_NAMESPACE::operator""_u256;
+
+TEST(LlvmLibcIntegerLiteralTest, u8) {
+ EXPECT_EQ(uint8_t(0), 0_u8);
+ EXPECT_EQ(uint8_t(UINT8_MAX), 255_u8);
+ EXPECT_EQ(uint8_t(UINT8_MAX), 0xFF_u8);
+ EXPECT_EQ(uint8_t(UINT8_MAX), 0b11111111_u8);
+}
+
+TEST(LlvmLibcIntegerLiteralTest, u16) {
+ EXPECT_EQ(uint16_t(0), 0_u16);
+ EXPECT_EQ(uint16_t(UINT8_MAX), 255_u16);
+ EXPECT_EQ(uint16_t(UINT8_MAX), 0xFF_u16);
+ EXPECT_EQ(uint16_t(UINT8_MAX), 0b11111111_u16);
+ EXPECT_EQ(uint16_t(UINT16_MAX), 65535_u16);
+ EXPECT_EQ(uint16_t(UINT16_MAX), 0xFFFF_u16);
+ EXPECT_EQ(uint16_t(UINT16_MAX), 0b11111111'11111111_u16);
+}
+
+TEST(LlvmLibcIntegerLiteralTest, u32) {
+ EXPECT_EQ(uint32_t(0), 0_u32);
+ EXPECT_EQ(uint32_t(UINT8_MAX), 255_u32);
+ EXPECT_EQ(uint32_t(UINT8_MAX), 0xFF_u32);
+ EXPECT_EQ(uint32_t(UINT8_MAX), 0b11111111_u32);
+ EXPECT_EQ(uint32_t(UINT16_MAX), 65535_u32);
+ EXPECT_EQ(uint32_t(UINT16_MAX), 0xFFFF_u32);
+ EXPECT_EQ(uint32_t(UINT16_MAX), 0b11111111'11111111_u32);
+ EXPECT_EQ(uint32_t(UINT32_MAX), 4294967295_u32);
+ EXPECT_EQ(uint32_t(UINT32_MAX), 0xFFFFFFFF_u32);
+ EXPECT_EQ(uint32_t(UINT32_MAX), 0b1111111111111111'1111111111111111_u32);
+}
+
+TEST(LlvmLibcIntegerLiteralTest, u64) {
+ EXPECT_EQ(uint64_t(0), 0_u64);
+ EXPECT_EQ(uint64_t(UINT8_MAX), 255_u64);
+ EXPECT_EQ(uint64_t(UINT8_MAX), 0xFF_u64);
+ EXPECT_EQ(uint64_t(UINT8_MAX), 0b11111111_u64);
+ EXPECT_EQ(uint64_t(UINT16_MAX), 65535_u64);
+ EXPECT_EQ(uint64_t(UINT16_MAX), 0xFFFF_u64);
+ EXPECT_EQ(uint64_t(UINT16_MAX), 0b11111111'11111111_u64);
+ EXPECT_EQ(uint64_t(UINT32_MAX), 4294967295_u64);
+ EXPECT_EQ(uint64_t(UINT32_MAX), 0xFFFFFFFF_u64);
+ EXPECT_EQ(uint64_t(UINT32_MAX), 0b1111111111111111'1111111111111111_u64);
+ EXPECT_EQ(uint64_t(UINT64_MAX), 18446744073709551615_u64);
+ EXPECT_EQ(uint64_t(UINT64_MAX), 0xFFFFFFFF'FFFFFFFF_u64);
+ EXPECT_EQ(
+ uint64_t(UINT64_MAX),
+ 0b1111111111111111'1111111111111111'1111111111111111'1111111111111111_u64);
+}
+
+TEST(LlvmLibcIntegerLiteralTest, u128) {
+#if defined(__SIZEOF_INT128__)
+ const __uint128_t ZERO = 0;
+ const __uint128_t U8_MAX = UINT8_MAX;
+ const __uint128_t U16_MAX = UINT16_MAX;
+ const __uint128_t U32_MAX = UINT32_MAX;
+ const __uint128_t U64_MAX = UINT64_MAX;
+ const __uint128_t U128_MAX = (U64_MAX << 64) | U64_MAX;
+#else
+ const UInt128 ZERO = 0;
+ const UInt128 U8_MAX = UINT8_MAX;
+ const UInt128 U16_MAX = UINT16_MAX;
+ const UInt128 U32_MAX = UINT32_MAX;
+ const UInt128 U64_MAX = UINT64_MAX;
+ const UInt128 U128_MAX = (U64_MAX << 64) | U64_MAX;
+#endif
+ EXPECT_EQ(ZERO, 0_u128);
+ EXPECT_EQ(U8_MAX, 255_u128);
+ EXPECT_EQ(U8_MAX, 0xFF_u128);
+ EXPECT_EQ(U8_MAX, 0b11111111_u128);
+ EXPECT_EQ(U16_MAX, 65535_u128);
+ EXPECT_EQ(U16_MAX, 0xFFFF_u128);
+ EXPECT_EQ(U16_MAX, 0b11111111'11111111_u128);
+ EXPECT_EQ(U32_MAX, 4294967295_u128);
+ EXPECT_EQ(U32_MAX, 0xFFFFFFFF_u128);
+ EXPECT_EQ(U32_MAX, 0b1111111111111111'1111111111111111_u128);
+ EXPECT_EQ(U64_MAX, 18446744073709551615_u128);
+ EXPECT_EQ(U64_MAX, 0xFFFFFFFF'FFFFFFFF_u128);
+ EXPECT_EQ(
+ U64_MAX,
+ 0b1111111111111111'1111111111111111'1111111111111111'1111111111111111_u128);
+ EXPECT_EQ(U128_MAX, 340282366920938463463374607431768211455_u128);
+ EXPECT_EQ(U128_MAX, 0xFFFFFFFF'FFFFFFFF'FFFFFFFF'FFFFFFFF_u128);
+ EXPECT_EQ(
+ U128_MAX,
+ 0b1111111111111111'1111111111111111'1111111111111111'1111111111111111'1111111111111111'1111111111111111'1111111111111111'1111111111111111_u128);
+}
+
+TEST(LlvmLibcIntegerLiteralTest, u256) {
+ using UInt256 = LIBC_NAMESPACE::cpp::UInt<256>;
+ const UInt256 ZERO = 0;
+ const UInt256 U8_MAX = UINT8_MAX;
+ const UInt256 U16_MAX = UINT16_MAX;
+ const UInt256 U32_MAX = UINT32_MAX;
+ const UInt256 U64_MAX = UINT64_MAX;
+ const UInt256 U128_MAX = (U64_MAX << 64) | U64_MAX;
+ const UInt256 U256_MAX = (U128_MAX << 128) | U128_MAX;
+ EXPECT_EQ(ZERO, 0_u256);
+ EXPECT_EQ(U8_MAX, 255_u256);
+ EXPECT_EQ(U8_MAX, 0xFF_u256);
+ EXPECT_EQ(U8_MAX, 0b11111111_u256);
+ EXPECT_EQ(U16_MAX, 65535_u256);
+ EXPECT_EQ(U16_MAX, 0xFFFF_u256);
+ EXPECT_EQ(U16_MAX, 0b11111111'11111111_u256);
+ EXPECT_EQ(U32_MAX, 4294967295_u256);
+ EXPECT_EQ(U32_MAX, 0xFFFFFFFF_u256);
+ EXPECT_EQ(U32_MAX, 0b1111111111111111'1111111111111111_u256);
+ EXPECT_EQ(U64_MAX, 18446744073709551615_u256);
+ EXPECT_EQ(U64_MAX, 0xFFFFFFFF'FFFFFFFF_u256);
+ EXPECT_EQ(
+ U64_MAX,
+ 0b1111111111111111'1111111111111111'1111111111111111'1111111111111111_u256);
+ EXPECT_EQ(U128_MAX, 0xFFFFFFFF'FFFFFFFF'FFFFFFFF'FFFFFFFF_u256);
+ EXPECT_EQ(
+ U256_MAX,
+ 0xFFFFFFFF'FFFFFFFF'FFFFFFFF'FFFFFFFF'FFFFFFFF'FFFFFFFF'FFFFFFFF'FFFFFFFF_u256);
+}
diff --git a/libc/test/src/__support/uint_test.cpp b/libc/test/src/__support/uint_test.cpp
index 0ad72c3..1a1171b 100644
--- a/libc/test/src/__support/uint_test.cpp
+++ b/libc/test/src/__support/uint_test.cpp
@@ -588,7 +588,7 @@ TEST(LlvmLibcUIntClassTest, ConstexprInitTests) {
d <<= e; \
LL_UInt320 q1 = y / d; \
LL_UInt320 r1 = y % d; \
- LL_UInt320 r2 = *y.div_uint32_times_pow_2(x, e); \
+ LL_UInt320 r2 = *y.div_uint_half_times_pow_2(x, e); \
EXPECT_EQ(q1, y); \
EXPECT_EQ(r1, r2); \
} while (0)
@@ -676,6 +676,52 @@ TEST(LlvmLibcUIntClassTest, ConstructorFromUInt128Tests) {
ASSERT_EQ(LL_UInt192(e + f), LL_UInt192(a + b));
}
+TEST(LlvmLibcUIntClassTest, WordTypeUInt128Tests) {
+ using LL_UInt256_128 = cpp::BigInt<256, false, __uint128_t>;
+ using LL_UInt128_128 = cpp::BigInt<128, false, __uint128_t>;
+
+ LL_UInt256_128 a(1);
+
+ ASSERT_EQ(static_cast<int>(a), 1);
+ a = (a << 128) + 2;
+ ASSERT_EQ(static_cast<int>(a), 2);
+ ASSERT_EQ(static_cast<uint64_t>(a), uint64_t(2));
+ a = (a << 32) + 3;
+ ASSERT_EQ(static_cast<int>(a), 3);
+ ASSERT_EQ(static_cast<uint64_t>(a), uint64_t(0x2'0000'0003));
+ ASSERT_EQ(static_cast<int>(a >> 32), 2);
+ ASSERT_EQ(static_cast<int>(a >> (128 + 32)), 1);
+
+ LL_UInt128_128 b(__uint128_t(1) << 127);
+ LL_UInt128_128 c(b);
+ a = b.ful_mul(c);
+
+ ASSERT_EQ(static_cast<int>(a >> 254), 1);
+
+ LL_UInt256_128 d = LL_UInt256_128(123) << 4;
+ ASSERT_EQ(static_cast<int>(d), 123 << 4);
+ LL_UInt256_128 e = a / d;
+ LL_UInt256_128 f = a % d;
+ LL_UInt256_128 r = *a.div_uint_half_times_pow_2(123, 4);
+ EXPECT_TRUE(e == a);
+ EXPECT_TRUE(f == r);
+}
+
#endif // __SIZEOF_INT128__
+TEST(LlvmLibcUIntClassTest, OtherWordTypeTests) {
+ using LL_UInt96 = cpp::BigInt<96, false, uint32_t>;
+
+ LL_UInt96 a(1);
+
+ ASSERT_EQ(static_cast<int>(a), 1);
+ a = (a << 32) + 2;
+ ASSERT_EQ(static_cast<int>(a), 2);
+ ASSERT_EQ(static_cast<uint64_t>(a), uint64_t(0x1'0000'0002));
+ a = (a << 32) + 3;
+ ASSERT_EQ(static_cast<int>(a), 3);
+ ASSERT_EQ(static_cast<int>(a >> 32), 2);
+ ASSERT_EQ(static_cast<int>(a >> 64), 1);
+}
+
} // namespace LIBC_NAMESPACE
diff --git a/libc/test/src/math/smoke/CMakeLists.txt b/libc/test/src/math/smoke/CMakeLists.txt
index 0d55be5..1824c67 100644
--- a/libc/test/src/math/smoke/CMakeLists.txt
+++ b/libc/test/src/math/smoke/CMakeLists.txt
@@ -878,7 +878,6 @@ add_fp_unittest(
HDRS
LdExpTest.h
DEPENDS
- libc.include.math
libc.src.math.ldexp
libc.src.__support.CPP.limits
libc.src.__support.FPUtil.fp_bits
@@ -894,7 +893,6 @@ add_fp_unittest(
HDRS
LdExpTest.h
DEPENDS
- libc.include.math
libc.src.math.ldexpf
libc.src.__support.CPP.limits
libc.src.__support.FPUtil.fp_bits
@@ -910,7 +908,6 @@ add_fp_unittest(
HDRS
LdExpTest.h
DEPENDS
- libc.include.math
libc.src.math.ldexpl
libc.src.__support.CPP.limits
libc.src.__support.FPUtil.fp_bits
@@ -918,6 +915,21 @@ add_fp_unittest(
)
add_fp_unittest(
+ ldexpf128_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ ldexpf128_test.cpp
+ HDRS
+ LdExpTest.h
+ DEPENDS
+ libc.src.math.ldexpf128
+ libc.src.__support.CPP.limits
+ libc.src.__support.FPUtil.fp_bits
+ libc.src.__support.FPUtil.normal_float
+)
+
+add_fp_unittest(
logb_test
SUITE
libc-math-smoke-tests
diff --git a/libc/test/src/math/smoke/LdExpTest.h b/libc/test/src/math/smoke/LdExpTest.h
index fe84b5f..7d17071 100644
--- a/libc/test/src/math/smoke/LdExpTest.h
+++ b/libc/test/src/math/smoke/LdExpTest.h
@@ -15,7 +15,6 @@
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
-#include <math.h>
#include <stdint.h>
template <typename T>
@@ -163,6 +162,7 @@ public:
TEST_F(LlvmLibcLdExpTest, UnderflowToZeroOnSubnormal) { \
testUnderflowToZeroOnSubnormal(&func); \
} \
- TEST_F(LlvmLibcLdExpTest, NormalOperation) { testNormalOperation(&func); }
+ TEST_F(LlvmLibcLdExpTest, NormalOperation) { testNormalOperation(&func); } \
+ static_assert(true)
#endif // LLVM_LIBC_TEST_SRC_MATH_LDEXPTEST_H
diff --git a/libc/test/src/math/smoke/ldexp_test.cpp b/libc/test/src/math/smoke/ldexp_test.cpp
index aad580f..adbf603 100644
--- a/libc/test/src/math/smoke/ldexp_test.cpp
+++ b/libc/test/src/math/smoke/ldexp_test.cpp
@@ -10,4 +10,4 @@
#include "src/math/ldexp.h"
-LIST_LDEXP_TESTS(double, LIBC_NAMESPACE::ldexp)
+LIST_LDEXP_TESTS(double, LIBC_NAMESPACE::ldexp);
diff --git a/libc/test/src/math/smoke/ldexpf128_test.cpp b/libc/test/src/math/smoke/ldexpf128_test.cpp
new file mode 100644
index 0000000..7ab34a4
--- /dev/null
+++ b/libc/test/src/math/smoke/ldexpf128_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for ldexpf128 -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "LdExpTest.h"
+
+#include "src/math/ldexpf128.h"
+
+LIST_LDEXP_TESTS(float128, LIBC_NAMESPACE::ldexpf128);
diff --git a/libc/test/src/math/smoke/ldexpf_test.cpp b/libc/test/src/math/smoke/ldexpf_test.cpp
index f4cce37..02fd8c5 100644
--- a/libc/test/src/math/smoke/ldexpf_test.cpp
+++ b/libc/test/src/math/smoke/ldexpf_test.cpp
@@ -10,4 +10,4 @@
#include "src/math/ldexpf.h"
-LIST_LDEXP_TESTS(float, LIBC_NAMESPACE::ldexpf)
+LIST_LDEXP_TESTS(float, LIBC_NAMESPACE::ldexpf);
diff --git a/libc/test/src/math/smoke/ldexpl_test.cpp b/libc/test/src/math/smoke/ldexpl_test.cpp
index 405e533..9bc17c5 100644
--- a/libc/test/src/math/smoke/ldexpl_test.cpp
+++ b/libc/test/src/math/smoke/ldexpl_test.cpp
@@ -10,4 +10,4 @@
#include "src/math/ldexpl.h"
-LIST_LDEXP_TESTS(long double, LIBC_NAMESPACE::ldexpl)
+LIST_LDEXP_TESTS(long double, LIBC_NAMESPACE::ldexpl);
diff --git a/libc/test/src/stdbit/CMakeLists.txt b/libc/test/src/stdbit/CMakeLists.txt
index bc7e49d..203f48b 100644
--- a/libc/test/src/stdbit/CMakeLists.txt
+++ b/libc/test/src/stdbit/CMakeLists.txt
@@ -6,6 +6,9 @@ set(prefixes
trailing_zeros
trailing_ones
first_leading_zero
+ first_leading_one
+ first_trailing_zero
+ first_trailing_one
)
set(suffixes c s i l ll)
foreach(prefix IN LISTS prefixes)
diff --git a/libc/test/src/stdbit/stdc_first_leading_one_uc_test.cpp b/libc/test/src/stdbit/stdc_first_leading_one_uc_test.cpp
new file mode 100644
index 0000000..b8c8db5
--- /dev/null
+++ b/libc/test/src/stdbit/stdc_first_leading_one_uc_test.cpp
@@ -0,0 +1,21 @@
+//===-- Unittests for stdc_first_leading_one_uc ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/CPP/limits.h"
+#include "src/stdbit/stdc_first_leading_one_uc.h"
+#include "test/UnitTest/Test.h"
+
+TEST(LlvmLibcStdcFirstLeadingOneUcTest, Zero) {
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_leading_one_uc(0U), 0U);
+}
+
+TEST(LlvmLibcStdcFirstLeadingOneUcTest, OneHot) {
+ for (unsigned i = 0U; i != UCHAR_WIDTH; ++i)
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_leading_one_uc(1U << i),
+ UCHAR_WIDTH - i);
+}
diff --git a/libc/test/src/stdbit/stdc_first_leading_one_ui_test.cpp b/libc/test/src/stdbit/stdc_first_leading_one_ui_test.cpp
new file mode 100644
index 0000000..319d748
--- /dev/null
+++ b/libc/test/src/stdbit/stdc_first_leading_one_ui_test.cpp
@@ -0,0 +1,21 @@
+//===-- Unittests for stdc_first_leading_one_ui ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/CPP/limits.h"
+#include "src/stdbit/stdc_first_leading_one_ui.h"
+#include "test/UnitTest/Test.h"
+
+TEST(LlvmLibcStdcFirstLeadingOneUiTest, Zero) {
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_leading_one_ui(0U), 0U);
+}
+
+TEST(LlvmLibcStdcFirstLeadingOneUiTest, OneHot) {
+ for (unsigned i = 0U; i != UINT_WIDTH; ++i)
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_leading_one_ui(1U << i),
+ UINT_WIDTH - i);
+}
diff --git a/libc/test/src/stdbit/stdc_first_leading_one_ul_test.cpp b/libc/test/src/stdbit/stdc_first_leading_one_ul_test.cpp
new file mode 100644
index 0000000..5884cec
--- /dev/null
+++ b/libc/test/src/stdbit/stdc_first_leading_one_ul_test.cpp
@@ -0,0 +1,21 @@
+//===-- Unittests for stdc_first_leading_one_ul ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/CPP/limits.h"
+#include "src/stdbit/stdc_first_leading_one_ul.h"
+#include "test/UnitTest/Test.h"
+
+TEST(LlvmLibcStdcFirstLeadingOneUlTest, Zero) {
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_leading_one_ul(0UL), 0U);
+}
+
+TEST(LlvmLibcStdcFirstLeadingOneUlTest, OneHot) {
+ for (unsigned i = 0U; i != ULONG_WIDTH; ++i)
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_leading_one_ul(1UL << i),
+ ULONG_WIDTH - i);
+}
diff --git a/libc/test/src/stdbit/stdc_first_leading_one_ull_test.cpp b/libc/test/src/stdbit/stdc_first_leading_one_ull_test.cpp
new file mode 100644
index 0000000..bf57f16
--- /dev/null
+++ b/libc/test/src/stdbit/stdc_first_leading_one_ull_test.cpp
@@ -0,0 +1,21 @@
+//===-- Unittests for stdc_first_leading_one_ull --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/CPP/limits.h"
+#include "src/stdbit/stdc_first_leading_one_ull.h"
+#include "test/UnitTest/Test.h"
+
+TEST(LlvmLibcStdcFirstLeadingOneUllTest, Zero) {
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_leading_one_ull(0ULL), 0U);
+}
+
+TEST(LlvmLibcStdcFirstLeadingOneUllTest, OneHot) {
+ for (unsigned i = 0U; i != ULLONG_WIDTH; ++i)
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_leading_one_ull(1ULL << i),
+ ULLONG_WIDTH - i);
+}
diff --git a/libc/test/src/stdbit/stdc_first_leading_one_us_test.cpp b/libc/test/src/stdbit/stdc_first_leading_one_us_test.cpp
new file mode 100644
index 0000000..e948833
--- /dev/null
+++ b/libc/test/src/stdbit/stdc_first_leading_one_us_test.cpp
@@ -0,0 +1,21 @@
+//===-- Unittests for stdc_first_leading_one_us ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/CPP/limits.h"
+#include "src/stdbit/stdc_first_leading_one_us.h"
+#include "test/UnitTest/Test.h"
+
+TEST(LlvmLibcStdcFirstLeadingOneUsTest, Zero) {
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_leading_one_us(0U), 0U);
+}
+
+TEST(LlvmLibcStdcFirstLeadingOneUsTest, OneHot) {
+ for (unsigned i = 0U; i != USHRT_WIDTH; ++i)
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_leading_one_us(1U << i),
+ USHRT_WIDTH - i);
+}
diff --git a/libc/test/src/stdbit/stdc_first_trailing_one_uc_test.cpp b/libc/test/src/stdbit/stdc_first_trailing_one_uc_test.cpp
new file mode 100644
index 0000000..ed2b492
--- /dev/null
+++ b/libc/test/src/stdbit/stdc_first_trailing_one_uc_test.cpp
@@ -0,0 +1,20 @@
+//===-- Unittests for stdc_first_trailing_one_uc -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/CPP/limits.h"
+#include "src/stdbit/stdc_first_trailing_one_uc.h"
+#include "test/UnitTest/Test.h"
+
+TEST(LlvmLibcStdcFirstTrailingOneUcTest, ALL) {
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_trailing_one_uc(UCHAR_MAX), 0U);
+}
+
+TEST(LlvmLibcStdcFirstTrailingOneUcTest, OneHot) {
+ for (unsigned i = 0U; i != UCHAR_WIDTH; ++i)
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_trailing_one_uc(1U << i), i + 1);
+}
diff --git a/libc/test/src/stdbit/stdc_first_trailing_one_ui_test.cpp b/libc/test/src/stdbit/stdc_first_trailing_one_ui_test.cpp
new file mode 100644
index 0000000..137c8a4
--- /dev/null
+++ b/libc/test/src/stdbit/stdc_first_trailing_one_ui_test.cpp
@@ -0,0 +1,20 @@
+//===-- Unittests for stdc_first_trailing_one_ui -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/CPP/limits.h"
+#include "src/stdbit/stdc_first_trailing_one_ui.h"
+#include "test/UnitTest/Test.h"
+
+TEST(LlvmLibcStdcFirstTrailingOneUiTest, ALL) {
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_trailing_one_ui(UINT_MAX), 0U);
+}
+
+TEST(LlvmLibcStdcFirstTrailingOneUiTest, OneHot) {
+ for (unsigned i = 0U; i != UINT_WIDTH; ++i)
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_trailing_one_ui(1U << i), i + 1);
+}
diff --git a/libc/test/src/stdbit/stdc_first_trailing_one_ul_test.cpp b/libc/test/src/stdbit/stdc_first_trailing_one_ul_test.cpp
new file mode 100644
index 0000000..3fc1f3f
--- /dev/null
+++ b/libc/test/src/stdbit/stdc_first_trailing_one_ul_test.cpp
@@ -0,0 +1,20 @@
+//===-- Unittests for stdc_first_trailing_one_ul -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/CPP/limits.h"
+#include "src/stdbit/stdc_first_trailing_one_ul.h"
+#include "test/UnitTest/Test.h"
+
+TEST(LlvmLibcStdcFirstTrailingOneUlTest, ALL) {
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_trailing_one_ul(ULONG_MAX), 0U);
+}
+
+TEST(LlvmLibcStdcFirstTrailingOneUlTest, OneHot) {
+ for (unsigned i = 0U; i != ULONG_WIDTH; ++i)
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_trailing_one_ul(1UL << i), i + 1);
+}
diff --git a/libc/test/src/stdbit/stdc_first_trailing_one_ull_test.cpp b/libc/test/src/stdbit/stdc_first_trailing_one_ull_test.cpp
new file mode 100644
index 0000000..5719e09
--- /dev/null
+++ b/libc/test/src/stdbit/stdc_first_trailing_one_ull_test.cpp
@@ -0,0 +1,20 @@
+//===-- Unittests for stdc_first_trailing_one_ull ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/CPP/limits.h"
+#include "src/stdbit/stdc_first_trailing_one_ull.h"
+#include "test/UnitTest/Test.h"
+
+TEST(LlvmLibcStdcFirstTrailingOneUllTest, ALL) {
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_trailing_one_ull(ULLONG_MAX), 0U);
+}
+
+TEST(LlvmLibcStdcFirstTrailingOneUllTest, OneHot) {
+ for (unsigned i = 0U; i != ULLONG_WIDTH; ++i)
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_trailing_one_ull(1ULL << i), i + 1);
+}
diff --git a/libc/test/src/stdbit/stdc_first_trailing_one_us_test.cpp b/libc/test/src/stdbit/stdc_first_trailing_one_us_test.cpp
new file mode 100644
index 0000000..6002155
--- /dev/null
+++ b/libc/test/src/stdbit/stdc_first_trailing_one_us_test.cpp
@@ -0,0 +1,20 @@
+//===-- Unittests for stdc_first_trailing_one_us -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/CPP/limits.h"
+#include "src/stdbit/stdc_first_trailing_one_us.h"
+#include "test/UnitTest/Test.h"
+
+TEST(LlvmLibcStdcFirstTrailingOneUsTest, ALL) {
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_trailing_one_us(USHRT_MAX), 0U);
+}
+
+TEST(LlvmLibcStdcFirstTrailingOneUsTest, OneHot) {
+ for (unsigned i = 0U; i != USHRT_WIDTH; ++i)
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_trailing_one_us(1U << i), i + 1);
+}
diff --git a/libc/test/src/stdbit/stdc_first_trailing_zero_uc_test.cpp b/libc/test/src/stdbit/stdc_first_trailing_zero_uc_test.cpp
new file mode 100644
index 0000000..2b17aa6
--- /dev/null
+++ b/libc/test/src/stdbit/stdc_first_trailing_zero_uc_test.cpp
@@ -0,0 +1,20 @@
+//===-- Unittests for stdc_first_trailing_zero_uc -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/CPP/limits.h"
+#include "src/stdbit/stdc_first_trailing_zero_uc.h"
+#include "test/UnitTest/Test.h"
+
+TEST(LlvmLibcStdcFirstTrailingZeroUcTest, ALL) {
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_trailing_zero_uc(UCHAR_MAX), 0U);
+}
+
+TEST(LlvmLibcStdcFirstTrailingZeroUcTest, ZeroHot) {
+ for (unsigned i = 0U; i != UCHAR_WIDTH; ++i)
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_trailing_zero_uc(~(1U << i)), i + 1);
+}
diff --git a/libc/test/src/stdbit/stdc_first_trailing_zero_ui_test.cpp b/libc/test/src/stdbit/stdc_first_trailing_zero_ui_test.cpp
new file mode 100644
index 0000000..0836614
--- /dev/null
+++ b/libc/test/src/stdbit/stdc_first_trailing_zero_ui_test.cpp
@@ -0,0 +1,20 @@
+//===-- Unittests for stdc_first_trailing_zero_ui -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/CPP/limits.h"
+#include "src/stdbit/stdc_first_trailing_zero_ui.h"
+#include "test/UnitTest/Test.h"
+
+TEST(LlvmLibcStdcFirstTrailingZeroUiTest, ALL) {
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_trailing_zero_ui(UINT_MAX), 0U);
+}
+
+TEST(LlvmLibcStdcFirstTrailingZeroUiTest, ZeroHot) {
+ for (unsigned i = 0U; i != UINT_WIDTH; ++i)
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_trailing_zero_ui(~(1U << i)), i + 1);
+}
diff --git a/libc/test/src/stdbit/stdc_first_trailing_zero_ul_test.cpp b/libc/test/src/stdbit/stdc_first_trailing_zero_ul_test.cpp
new file mode 100644
index 0000000..0c18cc7
--- /dev/null
+++ b/libc/test/src/stdbit/stdc_first_trailing_zero_ul_test.cpp
@@ -0,0 +1,20 @@
+//===-- Unittests for stdc_first_trailing_zero_ul -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/CPP/limits.h"
+#include "src/stdbit/stdc_first_trailing_zero_ul.h"
+#include "test/UnitTest/Test.h"
+
+TEST(LlvmLibcStdcFirstTrailingZeroUlTest, ALL) {
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_trailing_zero_ul(ULONG_MAX), 0U);
+}
+
+TEST(LlvmLibcStdcFirstTrailingZeroUlTest, ZeroHot) {
+ for (unsigned i = 0U; i != ULONG_WIDTH; ++i)
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_trailing_zero_ul(~(1UL << i)), i + 1);
+}
diff --git a/libc/test/src/stdbit/stdc_first_trailing_zero_ull_test.cpp b/libc/test/src/stdbit/stdc_first_trailing_zero_ull_test.cpp
new file mode 100644
index 0000000..5dce429
--- /dev/null
+++ b/libc/test/src/stdbit/stdc_first_trailing_zero_ull_test.cpp
@@ -0,0 +1,21 @@
+//===-- Unittests for stdc_first_trailing_zero_ull ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/CPP/limits.h"
+#include "src/stdbit/stdc_first_trailing_zero_ull.h"
+#include "test/UnitTest/Test.h"
+
+TEST(LlvmLibcStdcFirstTrailingZeroUllTest, ALL) {
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_trailing_zero_ull(ULLONG_MAX), 0U);
+}
+
+TEST(LlvmLibcStdcFirstTrailingZeroUllTest, ZeroHot) {
+ for (unsigned i = 0U; i != ULLONG_WIDTH; ++i)
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_trailing_zero_ull(~(1ULL << i)),
+ i + 1);
+}
diff --git a/libc/test/src/stdbit/stdc_first_trailing_zero_us_test.cpp b/libc/test/src/stdbit/stdc_first_trailing_zero_us_test.cpp
new file mode 100644
index 0000000..e370379
--- /dev/null
+++ b/libc/test/src/stdbit/stdc_first_trailing_zero_us_test.cpp
@@ -0,0 +1,20 @@
+//===-- Unittests for stdc_first_trailing_zero_us -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/CPP/limits.h"
+#include "src/stdbit/stdc_first_trailing_zero_us.h"
+#include "test/UnitTest/Test.h"
+
+TEST(LlvmLibcStdcFirstTrailingZeroUsTest, ALL) {
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_trailing_zero_us(USHRT_MAX), 0U);
+}
+
+TEST(LlvmLibcStdcFirstTrailingZeroUsTest, ZeroHot) {
+ for (unsigned i = 0U; i != USHRT_WIDTH; ++i)
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_first_trailing_zero_us(~(1U << i)), i + 1);
+}
diff --git a/libc/utils/gpu/server/rpc_server.cpp b/libc/utils/gpu/server/rpc_server.cpp
index a2e5d0f..4e535a2 100644
--- a/libc/utils/gpu/server/rpc_server.cpp
+++ b/libc/utils/gpu/server/rpc_server.cpp
@@ -396,62 +396,42 @@ const void *rpc_get_client_buffer(uint32_t device_id) {
uint64_t rpc_get_client_size() { return sizeof(rpc::Client); }
-using ServerPort = std::variant<rpc::Server<1>::Port *, rpc::Server<32>::Port *,
- rpc::Server<64>::Port *>;
+using ServerPort = std::variant<rpc::Server<0>::Port *>;
ServerPort get_port(rpc_port_t ref) {
- if (ref.lane_size == 1)
- return reinterpret_cast<rpc::Server<1>::Port *>(ref.handle);
- else if (ref.lane_size == 32)
- return reinterpret_cast<rpc::Server<32>::Port *>(ref.handle);
- else if (ref.lane_size == 64)
- return reinterpret_cast<rpc::Server<64>::Port *>(ref.handle);
- else
- __builtin_unreachable();
+ return reinterpret_cast<rpc::Server<0>::Port *>(ref.handle);
}
void rpc_send(rpc_port_t ref, rpc_port_callback_ty callback, void *data) {
- auto port = get_port(ref);
- std::visit(
- [=](auto &port) {
- port->send([=](rpc::Buffer *buffer) {
- callback(reinterpret_cast<rpc_buffer_t *>(buffer), data);
- });
- },
- port);
+ auto port = reinterpret_cast<rpc::Server<0>::Port *>(ref.handle);
+ port->send([=](rpc::Buffer *buffer) {
+ callback(reinterpret_cast<rpc_buffer_t *>(buffer), data);
+ });
}
void rpc_send_n(rpc_port_t ref, const void *const *src, uint64_t *size) {
- auto port = get_port(ref);
- std::visit([=](auto &port) { port->send_n(src, size); }, port);
+ auto port = reinterpret_cast<rpc::Server<0>::Port *>(ref.handle);
+ port->send_n(src, size);
}
void rpc_recv(rpc_port_t ref, rpc_port_callback_ty callback, void *data) {
- auto port = get_port(ref);
- std::visit(
- [=](auto &port) {
- port->recv([=](rpc::Buffer *buffer) {
- callback(reinterpret_cast<rpc_buffer_t *>(buffer), data);
- });
- },
- port);
+ auto port = reinterpret_cast<rpc::Server<0>::Port *>(ref.handle);
+ port->recv([=](rpc::Buffer *buffer) {
+ callback(reinterpret_cast<rpc_buffer_t *>(buffer), data);
+ });
}
void rpc_recv_n(rpc_port_t ref, void **dst, uint64_t *size, rpc_alloc_ty alloc,
void *data) {
- auto port = get_port(ref);
+ auto port = reinterpret_cast<rpc::Server<0>::Port *>(ref.handle);
auto alloc_fn = [=](uint64_t size) { return alloc(size, data); };
- std::visit([=](auto &port) { port->recv_n(dst, size, alloc_fn); }, port);
+ port->recv_n(dst, size, alloc_fn);
}
void rpc_recv_and_send(rpc_port_t ref, rpc_port_callback_ty callback,
void *data) {
- auto port = get_port(ref);
- std::visit(
- [=](auto &port) {
- port->recv_and_send([=](rpc::Buffer *buffer) {
- callback(reinterpret_cast<rpc_buffer_t *>(buffer), data);
- });
- },
- port);
+ auto port = reinterpret_cast<rpc::Server<0>::Port *>(ref.handle);
+ port->recv_and_send([=](rpc::Buffer *buffer) {
+ callback(reinterpret_cast<rpc_buffer_t *>(buffer), data);
+ });
}
diff --git a/libcxx/docs/Modules.rst b/libcxx/docs/Modules.rst
index 533c3fb..ee2b81d 100644
--- a/libcxx/docs/Modules.rst
+++ b/libcxx/docs/Modules.rst
@@ -218,9 +218,13 @@ Building this project is done with the following steps, assuming the files
$ mkdir build
$ cmake -G Ninja -S . -B build -DCMAKE_CXX_COMPILER=<path-to-compiler> -DLIBCXX_BUILD=<build>
+ $ ninja -j1 std -C build
$ ninja -C build
$ build/main
+.. note:: The ``std`` dependencies of ``std.compat`` is not always resolved when
+ building the ``std`` target using multiple jobs.
+
.. warning:: ``<path-to-compiler>`` should point point to the real binary and
not to a symlink.
diff --git a/libcxx/docs/Status/RangesAlgorithms.csv b/libcxx/docs/Status/RangesAlgorithms.csv
index 2fe530b..f7a51f7 100644
--- a/libcxx/docs/Status/RangesAlgorithms.csv
+++ b/libcxx/docs/Status/RangesAlgorithms.csv
@@ -3,13 +3,13 @@ C++20,all C++20 algorithms,N/A,N/A,✅
C++23,`find_last <https://wg21.link/P1223R5>`_,Unassigned,No patch yet,Not started
C++23,`find_last_if <https://wg21.link/P1223R5>`_,Unassigned,No patch yet,Not started
C++23,`find_last_if_not <https://wg21.link/P1223R5>`_,Unassigned,No patch yet,Not started
-C++23,`starts_with <https://wg21.link/P1659R3>`_,Zijun Zhao,`D150735 <https://llvm.org/D150735>`_,✅
-C++23,`ends_with <https://wg21.link/P1659R3>`_,Zijun Zhao,No patch yet,In Progress
+C++23,`starts_with <https://wg21.link/P1659R3>`_,Zijun Zhao,`D150735 <https://llvm.org/D150735>`_,Complete
+C++23,`ends_with <https://wg21.link/P1659R3>`_,Zijun Zhao, `D150831 <https://llvm.org/D150831>`_,Complete
C++23,`shift_left <https://wg21.link/p2440r1>`_,Unassigned,No patch yet,Not started
C++23,`shift_right <https://wg21.link/p2440r1>`_,Unassigned,No patch yet,Not started
C++23,`iota (algorithm) <https://wg21.link/p2440r1>`_,Unassigned,No patch yet,Not started
C++23,`fold <https://wg21.link/p2322r5>`_,Unassigned,No patch yet,Not started
-C++23,`contains <https://wg21.link/p2302r2>`_,Zijun Zhao,No patch yet,In Progress
+C++23,`contains <https://wg21.link/p2302r2>`_,Zijun Zhao, `#65148 <https://github.com/llvm/llvm-project/pull/65148>`_,Complete
C++23,`fold_left_with_iter <https://wg21.link/p2322r6>`_,Christopher Di Bella,N/A,Complete
C++23,`fold_left <https://wg21.link/p2322r6>`_,Christopher Di Bella,N/A,Complete
C++23,`fold_left_first_with_iter <https://wg21.link/p2322r6>`_,Christopher Di Bella,N/A,In progress
diff --git a/libcxx/include/CMakeLists.txt b/libcxx/include/CMakeLists.txt
index d55dc66..b440683 100644
--- a/libcxx/include/CMakeLists.txt
+++ b/libcxx/include/CMakeLists.txt
@@ -110,6 +110,7 @@ set(files
__algorithm/ranges_binary_search.h
__algorithm/ranges_clamp.h
__algorithm/ranges_contains.h
+ __algorithm/ranges_contains_subrange.h
__algorithm/ranges_copy.h
__algorithm/ranges_copy_backward.h
__algorithm/ranges_copy_if.h
diff --git a/libcxx/include/__algorithm/ranges_contains_subrange.h b/libcxx/include/__algorithm/ranges_contains_subrange.h
new file mode 100644
index 0000000..4cd03cb
--- /dev/null
+++ b/libcxx/include/__algorithm/ranges_contains_subrange.h
@@ -0,0 +1,99 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___ALGORITHM_RANGES_CONTAINS_SUBRANGE_H
+#define _LIBCPP___ALGORITHM_RANGES_CONTAINS_SUBRANGE_H
+
+#include <__algorithm/ranges_search.h>
+#include <__config>
+#include <__functional/identity.h>
+#include <__functional/ranges_operations.h>
+#include <__functional/reference_wrapper.h>
+#include <__iterator/concepts.h>
+#include <__iterator/distance.h>
+#include <__iterator/indirectly_comparable.h>
+#include <__iterator/projected.h>
+#include <__ranges/access.h>
+#include <__ranges/concepts.h>
+#include <__ranges/subrange.h>
+#include <__utility/move.h>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_PUSH_MACROS
+#include <__undef_macros>
+
+#if _LIBCPP_STD_VER >= 23
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+namespace ranges {
+namespace __contains_subrange {
+struct __fn {
+ template <forward_iterator _Iter1,
+ sentinel_for<_Iter1> _Sent1,
+ forward_iterator _Iter2,
+ sentinel_for<_Iter2> _Sent2,
+ class _Pred = ranges::equal_to,
+ class _Proj1 = identity,
+ class _Proj2 = identity>
+ requires indirectly_comparable<_Iter1, _Iter2, _Pred, _Proj1, _Proj2>
+ _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool static operator()(
+ _Iter1 __first1,
+ _Sent1 __last1,
+ _Iter2 __first2,
+ _Sent2 __last2,
+ _Pred __pred = {},
+ _Proj1 __proj1 = {},
+ _Proj2 __proj2 = {}) {
+ auto __n2 = ranges::distance(__first2, __last2);
+ if (__n2 == 0)
+ return true;
+
+ auto __ret = ranges::search(
+ std::move(__first1), __last1, std::move(__first2), __last2, __pred, std::ref(__proj1), std::ref(__proj2));
+ return __ret.empty() == false;
+ }
+
+ template <forward_range _Range1,
+ forward_range _Range2,
+ class _Pred = ranges::equal_to,
+ class _Proj1 = identity,
+ class _Proj2 = identity>
+ requires indirectly_comparable<iterator_t<_Range1>, iterator_t<_Range2>, _Pred, _Proj1, _Proj2>
+ _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool static
+ operator()(_Range1&& __range1, _Range2&& __range2, _Pred __pred = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) {
+ auto __n2 = 0;
+ if constexpr (sized_range<_Range2>) {
+ __n2 = ranges::size(__range2);
+ } else {
+ __n2 = std::distance(cbegin(__range2), cend(__range2));
+ }
+ if (__n2 == 0)
+ return true;
+
+ auto __ret = ranges::search(__range1, __range2, __pred, std::ref(__proj1), std::ref(__proj2));
+ return __ret.empty() == false;
+ }
+};
+} // namespace __contains_subrange
+
+inline namespace __cpo {
+inline constexpr auto contains_subrange = __contains_subrange::__fn{};
+} // namespace __cpo
+} // namespace ranges
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP_STD_VER >= 23
+
+_LIBCPP_POP_MACROS
+
+#endif // _LIBCPP___ALGORITHM_RANGES_CONTAINS_SUBRANGE_H
diff --git a/libcxx/include/algorithm b/libcxx/include/algorithm
index 1176602..70e30bc 100644
--- a/libcxx/include/algorithm
+++ b/libcxx/include/algorithm
@@ -217,6 +217,19 @@ namespace ranges {
constexpr ranges::minmax_element_result<borrowed_iterator_t<R>>
minmax_element(R&& r, Comp comp = {}, Proj proj = {}); // since C++20
+ template<forward_iterator I1, sentinel_for<I1> S1,
+ forward_iterator I2, sentinel_for<I2> S2,
+ class Pred = ranges::equal_to, class Proj1 = identity, class Proj2 = identity>
+ requires indirectly_comparable<I1, I2, Pred, Proj1, Proj2>
+ constexpr bool contains_subrange(I1 first1, S1 last1, I2 first2, S2 last2,
+ Pred pred = {}, Proj1 proj1 = {}, Proj2 proj2 = {}); // since C++23
+
+ template<forward_range R1, forward_range R2,
+ class Pred = ranges::equal_to, class Proj1 = identity, class Proj2 = identity>
+ requires indirectly_comparable<iterator_t<R1>, iterator_t<R2>, Pred, Proj1, Proj2>
+ constexpr bool contains_subrange(R1&& r1, R2&& r2, Pred pred = {},
+ Proj1 proj1 = {}, Proj2 proj2 = {}); // since C++23
+
template<class I, class O>
using copy_result = in_out_result<I, O>; // since C++20
@@ -1875,6 +1888,7 @@ template <class BidirectionalIterator, class Compare>
#include <__algorithm/ranges_binary_search.h>
#include <__algorithm/ranges_clamp.h>
#include <__algorithm/ranges_contains.h>
+#include <__algorithm/ranges_contains_subrange.h>
#include <__algorithm/ranges_copy.h>
#include <__algorithm/ranges_copy_backward.h>
#include <__algorithm/ranges_copy_if.h>
diff --git a/libcxx/include/libcxx.imp b/libcxx/include/libcxx.imp
index 69de470..3f056d4 100644
--- a/libcxx/include/libcxx.imp
+++ b/libcxx/include/libcxx.imp
@@ -110,6 +110,7 @@
{ include: [ "<__algorithm/ranges_binary_search.h>", "private", "<algorithm>", "public" ] },
{ include: [ "<__algorithm/ranges_clamp.h>", "private", "<algorithm>", "public" ] },
{ include: [ "<__algorithm/ranges_contains.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_contains_subrange.h>", "private", "<algorithm>", "public" ] },
{ include: [ "<__algorithm/ranges_copy.h>", "private", "<algorithm>", "public" ] },
{ include: [ "<__algorithm/ranges_copy_backward.h>", "private", "<algorithm>", "public" ] },
{ include: [ "<__algorithm/ranges_copy_if.h>", "private", "<algorithm>", "public" ] },
diff --git a/libcxx/include/module.modulemap.in b/libcxx/include/module.modulemap.in
index f3246f8..63af3a9 100644
--- a/libcxx/include/module.modulemap.in
+++ b/libcxx/include/module.modulemap.in
@@ -778,6 +778,7 @@ module std_private_algorithm_ranges_clamp [system
export std_private_functional_ranges_operations
}
module std_private_algorithm_ranges_contains [system] { header "__algorithm/ranges_contains.h" }
+module std_private_algorithm_ranges_contains_subrange [system] { header "__algorithm/ranges_contains_subrange.h" }
module std_private_algorithm_ranges_copy [system] {
header "__algorithm/ranges_copy.h"
export std_private_algorithm_in_out_result
diff --git a/libcxx/include/sstream b/libcxx/include/sstream
index 6c354cf..8862e2ef 100644
--- a/libcxx/include/sstream
+++ b/libcxx/include/sstream
@@ -48,10 +48,12 @@ public:
template <class SAlloc>
explicit basic_stringbuf(const basic_string<char_type, traits_type, SAlloc>& s,
ios_base::openmode which = ios_base::in | ios_base::out); // C++20
+ basic_stringbuf(const basic_stringbuf&) = delete;
basic_stringbuf(basic_stringbuf&& rhs);
basic_stringbuf(basic_stringbuf&& rhs, const allocator_type& a); // C++20
// [stringbuf.assign] Assign and swap:
+ basic_stringbuf& operator=(const basic_stringbuf&) = delete;
basic_stringbuf& operator=(basic_stringbuf&& rhs);
void swap(basic_stringbuf& rhs) noexcept(see below); // conditionally noexcept since C++20
@@ -119,9 +121,11 @@ public:
template <class SAlloc>
explicit basic_istringstream(const basic_string<char_type, traits_type, SAlloc>& s,
ios_base::openmode which = ios_base::in); // C++20
+ basic_istringstream(const basic_istringstream&) = delete;
basic_istringstream(basic_istringstream&& rhs);
// [istringstream.assign] Assign and swap:
+ basic_istringstream& operator=(const basic_istringstream&) = delete;
basic_istringstream& operator=(basic_istringstream&& rhs);
void swap(basic_istringstream& rhs);
@@ -178,9 +182,11 @@ public:
template <class SAlloc>
explicit basic_ostringstream(const basic_string<char_type, traits_type, SAlloc>& s,
ios_base::openmode which = ios_base::out); // C++20
+ basic_ostringstream(const basic_ostringstream&) = delete;
basic_ostringstream(basic_ostringstream&& rhs);
// [ostringstream.assign] Assign and swap:
+ basic_ostringstream& operator=(const basic_ostringstream&) = delete;
basic_ostringstream& operator=(basic_ostringstream&& rhs);
void swap(basic_ostringstream& rhs);
@@ -237,9 +243,11 @@ public:
template <class SAlloc>
explicit basic_stringstream(const basic_string<char_type, traits_type, SAlloc>& s,
ios_base::openmode which = ios_base::out | ios_base::in); // C++20
+ basic_stringstream(const basic_stringstream&) = delete;
basic_stringstream(basic_stringstream&& rhs);
// [stringstream.assign] Assign and swap:
+ basic_stringstream& operator=(const basic_stringstream&) = delete;
basic_stringstream& operator=(basic_stringstream&& rhs);
void swap(basic_stringstream& rhs);
@@ -364,6 +372,7 @@ public:
}
#endif // _LIBCPP_STD_VER >= 20
+ basic_stringbuf(const basic_stringbuf&) = delete;
basic_stringbuf(basic_stringbuf&& __rhs) : __mode_(__rhs.__mode_) { __move_init(std::move(__rhs)); }
#if _LIBCPP_STD_VER >= 20
@@ -374,6 +383,7 @@ public:
#endif
// [stringbuf.assign] Assign and swap:
+ basic_stringbuf& operator=(const basic_stringbuf&) = delete;
basic_stringbuf& operator=(basic_stringbuf&& __rhs);
void swap(basic_stringbuf& __rhs)
#if _LIBCPP_STD_VER >= 20
@@ -822,12 +832,14 @@ public:
: basic_istream<_CharT, _Traits>(std::addressof(__sb_)), __sb_(__s, __wch | ios_base::in) {}
#endif // _LIBCPP_STD_VER >= 20
+ basic_istringstream(const basic_istringstream&) = delete;
_LIBCPP_HIDE_FROM_ABI basic_istringstream(basic_istringstream&& __rhs)
: basic_istream<_CharT, _Traits>(std::move(__rhs)), __sb_(std::move(__rhs.__sb_)) {
basic_istream<_CharT, _Traits>::set_rdbuf(&__sb_);
}
// [istringstream.assign] Assign and swap:
+ basic_istringstream& operator=(const basic_istringstream&) = delete;
basic_istringstream& operator=(basic_istringstream&& __rhs) {
basic_istream<char_type, traits_type>::operator=(std::move(__rhs));
__sb_ = std::move(__rhs.__sb_);
@@ -929,12 +941,14 @@ public:
: basic_ostream<_CharT, _Traits>(std::addressof(__sb_)), __sb_(__s, __wch | ios_base::out) {}
#endif // _LIBCPP_STD_VER >= 20
+ basic_ostringstream(const basic_ostringstream&) = delete;
_LIBCPP_HIDE_FROM_ABI basic_ostringstream(basic_ostringstream&& __rhs)
: basic_ostream<_CharT, _Traits>(std::move(__rhs)), __sb_(std::move(__rhs.__sb_)) {
basic_ostream<_CharT, _Traits>::set_rdbuf(&__sb_);
}
// [ostringstream.assign] Assign and swap:
+ basic_ostringstream& operator=(const basic_ostringstream&) = delete;
basic_ostringstream& operator=(basic_ostringstream&& __rhs) {
basic_ostream<char_type, traits_type>::operator=(std::move(__rhs));
__sb_ = std::move(__rhs.__sb_);
@@ -1040,12 +1054,14 @@ public:
: basic_iostream<_CharT, _Traits>(std::addressof(__sb_)), __sb_(__s, __wch) {}
#endif // _LIBCPP_STD_VER >= 20
+ basic_stringstream(const basic_stringstream&) = delete;
_LIBCPP_HIDE_FROM_ABI basic_stringstream(basic_stringstream&& __rhs)
: basic_iostream<_CharT, _Traits>(std::move(__rhs)), __sb_(std::move(__rhs.__sb_)) {
basic_istream<_CharT, _Traits>::set_rdbuf(&__sb_);
}
// [stringstream.assign] Assign and swap:
+ basic_stringstream& operator=(const basic_stringstream&) = delete;
basic_stringstream& operator=(basic_stringstream&& __rhs) {
basic_iostream<char_type, traits_type>::operator=(std::move(__rhs));
__sb_ = std::move(__rhs.__sb_);
diff --git a/libcxx/modules/CMakeLists.txt b/libcxx/modules/CMakeLists.txt
index 0388c04..0dea8cf 100644
--- a/libcxx/modules/CMakeLists.txt
+++ b/libcxx/modules/CMakeLists.txt
@@ -137,6 +137,25 @@ set(LIBCXX_MODULE_STD_COMPAT_SOURCES
std.compat/cwctype.inc
)
+# TODO MODULES the CMakeLists.txt in the build directory is only temporary.
+# This allows using as available in the build directory. Once build systems
+# have proper support for the installed files this will be removed.
+if ("${LIBCXX_GENERATED_INCLUDE_DIR}" STREQUAL "${LIBCXX_GENERATED_INCLUDE_TARGET_DIR}")
+ # This typically happens when the target is not installed.
+ set(LIBCXX_CONFIGURED_INCLUDE_DIRS "${LIBCXX_GENERATED_INCLUDE_DIR}")
+else()
+ # It's important that the arch directory be included first so that its header files
+ # which interpose on the default include dir be included instead of the default ones.
+ set(LIBCXX_CONFIGURED_INCLUDE_DIRS
+ "${LIBCXX_GENERATED_INCLUDE_TARGET_DIR};${LIBCXX_GENERATED_INCLUDE_DIR}"
+ )
+endif()
+configure_file(
+ "CMakeLists.txt.in"
+ "${LIBCXX_GENERATED_MODULE_DIR}/CMakeLists.txt"
+ @ONLY
+)
+
set(LIBCXX_MODULE_STD_INCLUDE_SOURCES)
foreach(file ${LIBCXX_MODULE_STD_SOURCES})
set(
@@ -166,6 +185,7 @@ configure_file(
)
set(_all_modules)
+list(APPEND _all_modules "${LIBCXX_GENERATED_MODULE_DIR}/CMakeLists.txt")
list(APPEND _all_modules "${LIBCXX_GENERATED_MODULE_DIR}/std.cppm")
list(APPEND _all_modules "${LIBCXX_GENERATED_MODULE_DIR}/std.compat.cppm")
foreach(file ${LIBCXX_MODULE_STD_SOURCES} ${LIBCXX_MODULE_STD_COMPAT_SOURCES})
diff --git a/libcxx/modules/CMakeLists.txt.in b/libcxx/modules/CMakeLists.txt.in
new file mode 100644
index 0000000..e332d70
--- /dev/null
+++ b/libcxx/modules/CMakeLists.txt.in
@@ -0,0 +1,88 @@
+cmake_minimum_required(VERSION 3.26)
+
+project(libc++-modules LANGUAGES CXX)
+
+# Enable CMake's module support
+if(CMAKE_VERSION VERSION_LESS "3.28.0")
+ if(CMAKE_VERSION VERSION_LESS "3.27.0")
+ set(CMAKE_EXPERIMENTAL_CXX_MODULE_CMAKE_API "2182bf5c-ef0d-489a-91da-49dbc3090d2a")
+ else()
+ set(CMAKE_EXPERIMENTAL_CXX_MODULE_CMAKE_API "aa1f7df0-828a-4fcd-9afc-2dc80491aca7")
+ endif()
+ set(CMAKE_EXPERIMENTAL_CXX_MODULE_DYNDEP 1)
+else()
+ cmake_policy(VERSION 3.28)
+endif()
+
+# Default to C++ extensions being off. Libc++'s modules support have trouble
+# with extensions right now.
+set(CMAKE_CXX_EXTENSIONS OFF)
+
+# Propagates the CMake options to the modules.
+#
+# This uses the std module hard-coded since the std.compat module does not
+# depend on these flags.
+macro(compile_define_if_not condition def)
+ if (NOT ${condition})
+ target_compile_definitions(std PRIVATE ${def})
+ endif()
+endmacro()
+macro(compile_define_if condition def)
+ if (${condition})
+ target_compile_definitions(std PRIVATE ${def})
+ endif()
+endmacro()
+
+### STD
+
+add_library(std)
+target_sources(std
+ PUBLIC FILE_SET cxx_modules TYPE CXX_MODULES FILES
+ std.cppm
+)
+
+target_include_directories(std SYSTEM PRIVATE @LIBCXX_CONFIGURED_INCLUDE_DIRS@)
+
+if (NOT @LIBCXX_ENABLE_EXCEPTIONS@)
+ target_compile_options(std PUBLIC -fno-exceptions)
+endif()
+
+target_compile_options(std
+ PUBLIC
+ -nostdinc++
+ -Wno-reserved-module-identifier
+ -Wno-reserved-user-defined-literal
+ @LIBCXX_COMPILE_FLAGS@
+)
+set_target_properties(std
+ PROPERTIES
+ OUTPUT_NAME "c++std"
+)
+
+### STD.COMPAT
+
+add_library(std.compat)
+target_sources(std.compat
+ PUBLIC FILE_SET cxx_modules TYPE CXX_MODULES FILES
+ std.compat.cppm
+)
+
+target_include_directories(std.compat SYSTEM PRIVATE @LIBCXX_CONFIGURED_INCLUDE_DIRS@)
+
+if (NOT @LIBCXX_ENABLE_EXCEPTIONS@)
+ target_compile_options(std.compat PUBLIC -fno-exceptions)
+endif()
+
+target_compile_options(std.compat
+ PUBLIC
+ -nostdinc++
+ -Wno-reserved-module-identifier
+ -Wno-reserved-user-defined-literal
+ -fmodule-file=std=${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/std.dir/std.pcm
+ @LIBCXX_COMPILE_FLAGS@
+)
+set_target_properties(std.compat
+ PROPERTIES
+ OUTPUT_NAME "c++std.compat"
+)
+add_dependencies(std.compat std)
diff --git a/libcxx/modules/std/algorithm.inc b/libcxx/modules/std/algorithm.inc
index 75e8a3a..e7796bf 100644
--- a/libcxx/modules/std/algorithm.inc
+++ b/libcxx/modules/std/algorithm.inc
@@ -46,9 +46,7 @@ export namespace std {
// [alg.contains], contains
namespace ranges {
using std::ranges::contains;
-#if 0
using std::ranges::contains_subrange;
-#endif
} // namespace ranges
#endif // _LIBCPP_STD_VER >= 23
diff --git a/libcxx/test/libcxx/algorithms/ranges_robust_against_copying_projections.pass.cpp b/libcxx/test/libcxx/algorithms/ranges_robust_against_copying_projections.pass.cpp
index e96a57f..71823d9 100644
--- a/libcxx/test/libcxx/algorithms/ranges_robust_against_copying_projections.pass.cpp
+++ b/libcxx/test/libcxx/algorithms/ranges_robust_against_copying_projections.pass.cpp
@@ -86,6 +86,10 @@ constexpr bool all_the_algorithms()
assert(copies == 0);
(void)std::ranges::contains(a, value, Proj(&copies));
assert(copies == 0);
+ (void)std::ranges::contains_subrange(first, last, first2, last2, Equal(), Proj(&copies), Proj(&copies));
+ assert(copies == 0);
+ (void)std::ranges::contains_subrange(a, b, Equal(), Proj(&copies), Proj(&copies));
+ assert(copies == 0);
#endif
(void)std::ranges::count(first, last, value, Proj(&copies)); assert(copies == 0);
(void)std::ranges::count(a, value, Proj(&copies)); assert(copies == 0);
diff --git a/libcxx/test/libcxx/diagnostics/ranges.nodiscard_extensions.compile.pass.cpp b/libcxx/test/libcxx/diagnostics/ranges.nodiscard_extensions.compile.pass.cpp
index 19e07b8..12f6b27 100644
--- a/libcxx/test/libcxx/diagnostics/ranges.nodiscard_extensions.compile.pass.cpp
+++ b/libcxx/test/libcxx/diagnostics/ranges.nodiscard_extensions.compile.pass.cpp
@@ -33,6 +33,8 @@ void test() {
#if TEST_STD_VER >= 23
std::ranges::contains(range, 1);
std::ranges::contains(iter, iter, 1);
+ std::ranges::contains_subrange(range, range);
+ std::ranges::contains_subrange(iter, iter, iter, iter);
#endif
std::ranges::count_if(range, pred);
std::ranges::count_if(iter, iter, pred);
diff --git a/libcxx/test/libcxx/diagnostics/ranges.nodiscard_extensions.verify.cpp b/libcxx/test/libcxx/diagnostics/ranges.nodiscard_extensions.verify.cpp
index 5e45ad0..57ce6ab 100644
--- a/libcxx/test/libcxx/diagnostics/ranges.nodiscard_extensions.verify.cpp
+++ b/libcxx/test/libcxx/diagnostics/ranges.nodiscard_extensions.verify.cpp
@@ -95,6 +95,10 @@ void test() {
// expected-warning@-1{{ignoring return value of function declared with 'nodiscard' attribute}}
std::ranges::contains(iter, iter, 1);
// expected-warning@-1{{ignoring return value of function declared with 'nodiscard' attribute}}
+ std::ranges::contains_subrange(range, range);
+ // expected-warning@-1 {{ignoring return value of function declared with 'nodiscard' attribute}}
+ std::ranges::contains_subrange(iter, iter, iter, iter);
+ // expected-warning@-1 {{ignoring return value of function declared with 'nodiscard' attribute}}
std::ranges::fold_left(range, 0, std::plus());
// expected-warning@-1{{ignoring return value of function declared with 'nodiscard' attribute}}
std::ranges::fold_left(iter, iter, 0, std::plus());
diff --git a/libcxx/test/std/algorithms/alg.nonmodifying/alg.contains/ranges.contains_subrange.pass.cpp b/libcxx/test/std/algorithms/alg.nonmodifying/alg.contains/ranges.contains_subrange.pass.cpp
new file mode 100644
index 0000000..d48ee9e
--- /dev/null
+++ b/libcxx/test/std/algorithms/alg.nonmodifying/alg.contains/ranges.contains_subrange.pass.cpp
@@ -0,0 +1,320 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// <algorithm>
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20
+// ADDITIONAL_COMPILE_FLAGS(has-fconstexpr-steps): -fconstexpr-steps=2000000
+
+// template<forward_iterator I1, sentinel_for<I1> S1,
+// forward_iterator I2, sentinel_for<I2> S2, class Proj = identity>
+// requires indirectly_comparable<I1, I2, Pred, Proj1, Proj2>
+// constexpr bool ranges::contains_subrange(I1 first1, S1 last1, I2 first2, S2 last2,
+// Pred pred = {}, Proj1 proj1 = {}, Proj2 proj2 = {}); // since C++23
+
+// template<forward_range R1, forward_range R2,
+// class Pred = ranges::equal_to, class Proj1 = identity, class Proj2 = identity>
+// requires indirectly_comparable<iterator_t<R1>, iterator_t<R2>, Pred, Proj1, Proj2>
+// constexpr bool ranges::contains_subrange(R1&& r1, R2&& r2, Pred pred = {},
+// Proj1 proj1 = {}, Proj2 proj2 = {}); // since C++23
+
+#include <algorithm>
+#include <cassert>
+#include <concepts>
+#include <ranges>
+#include <utility>
+
+#include "almost_satisfies_types.h"
+#include "test_iterators.h"
+
+struct NotEqualityComparable {};
+
+template <class Iter1, class Sent1 = Iter1, class Iter2 = int*, class Sent2 = Iter2>
+concept HasContainsSubrangeIt = requires(Iter1 first1, Sent1 last1, Iter2 first2, Sent2 last2) {
+ std::ranges::contains_subrange(first1, last1, first2, last2);
+};
+
+static_assert(HasContainsSubrangeIt<int*>);
+static_assert(!HasContainsSubrangeIt<ForwardIteratorNotDerivedFrom>);
+static_assert(!HasContainsSubrangeIt<ForwardIteratorNotIncrementable>);
+static_assert(!HasContainsSubrangeIt<int*, SentinelForNotSemiregular>);
+static_assert(!HasContainsSubrangeIt<int*, int*, int**>); // not indirectly comparable
+static_assert(!HasContainsSubrangeIt<int*, SentinelForNotWeaklyEqualityComparableWith>);
+static_assert(!HasContainsSubrangeIt<int*, int*, ForwardIteratorNotDerivedFrom>);
+static_assert(!HasContainsSubrangeIt<int*, int*, ForwardIteratorNotIncrementable>);
+static_assert(!HasContainsSubrangeIt<int*, int*, int*, SentinelForNotSemiregular>);
+static_assert(!HasContainsSubrangeIt<int*, int*, int*, SentinelForNotWeaklyEqualityComparableWith>);
+
+template <class Range1, class Range2 = UncheckedRange<int*>>
+concept HasContainsSubrangeR = requires(Range1&& range1, Range2&& range2) {
+ std::ranges::contains_subrange(std::forward<Range1>(range1), std::forward<Range2>(range2));
+};
+
+static_assert(HasContainsSubrangeR<UncheckedRange<int*>>);
+static_assert(!HasContainsSubrangeR<ForwardRangeNotDerivedFrom>);
+static_assert(!HasContainsSubrangeR<ForwardIteratorNotIncrementable>);
+static_assert(!HasContainsSubrangeR<ForwardRangeNotSentinelSemiregular>);
+static_assert(!HasContainsSubrangeR<ForwardRangeNotSentinelEqualityComparableWith>);
+static_assert(!HasContainsSubrangeR<UncheckedRange<int*>, UncheckedRange<int**>>); // not indirectly comparable
+static_assert(!HasContainsSubrangeR<UncheckedRange<int*>, ForwardRangeNotDerivedFrom>);
+static_assert(!HasContainsSubrangeR<UncheckedRange<int*>, ForwardRangeNotIncrementable>);
+static_assert(!HasContainsSubrangeR<UncheckedRange<int*>, ForwardRangeNotSentinelSemiregular>);
+static_assert(!HasContainsSubrangeR<UncheckedRange<int*>, ForwardRangeNotSentinelEqualityComparableWith>);
+
+template <class Iter1, class Sent1 = Iter1, class Iter2, class Sent2 = Iter2>
+constexpr void test_iterators() {
+ { // simple tests
+ int a[] = {1, 2, 3, 4, 5, 6};
+ int p[] = {3, 4, 5};
+ auto whole = std::ranges::subrange(Iter1(a), Sent1(Iter1(std::end(a))));
+ auto subrange = std::ranges::subrange(Iter2(p), Sent2(Iter2(std::end(p))));
+ {
+ std::same_as<bool> decltype(auto) ret =
+ std::ranges::contains_subrange(whole.begin(), whole.end(), subrange.begin(), subrange.end());
+ assert(ret);
+ }
+ {
+ std::same_as<bool> decltype(auto) ret = std::ranges::contains_subrange(whole, subrange);
+ assert(ret);
+ }
+ }
+
+ { // no match
+ int a[] = {1, 2, 3, 4, 5, 6};
+ int p[] = {3, 4, 2};
+ auto whole = std::ranges::subrange(Iter1(a), Sent1(Iter1(std::end(a))));
+ auto subrange = std::ranges::subrange(Iter2(p), Sent2(Iter2(std::end(p))));
+ {
+ bool ret = std::ranges::contains_subrange(whole.begin(), whole.end(), subrange.begin(), subrange.end());
+ assert(!ret);
+ }
+ {
+ bool ret = std::ranges::contains_subrange(whole, subrange);
+ assert(!ret);
+ }
+ }
+
+ { // range consists of just one element
+ int a[] = {3};
+ int p[] = {3, 4, 2};
+ auto whole = std::ranges::subrange(Iter1(a), Sent1(Iter1(std::end(a))));
+ auto subrange = std::ranges::subrange(Iter2(p), Sent2(Iter2(std::end(p))));
+ {
+ bool ret = std::ranges::contains_subrange(whole.begin(), whole.end(), subrange.begin(), subrange.end());
+ assert(!ret);
+ }
+ {
+ bool ret = std::ranges::contains_subrange(whole, subrange);
+ assert(!ret);
+ }
+ }
+
+ { // subrange consists of just one element
+ int a[] = {23, 1, 20, 3, 54, 2};
+ int p[] = {3};
+ auto whole = std::ranges::subrange(Iter1(a), Sent1(Iter1(std::end(a))));
+ auto subrange = std::ranges::subrange(Iter2(p), Sent2(Iter2(std::end(p))));
+ {
+ bool ret = std::ranges::contains_subrange(whole.begin(), whole.end(), subrange.begin(), subrange.end());
+ assert(ret);
+ }
+ {
+ bool ret = std::ranges::contains_subrange(whole, subrange);
+ assert(ret);
+ }
+ }
+
+ { // range has zero length
+ int a[] = {};
+ int p[] = {3, 4, 2};
+ auto whole = std::ranges::subrange(Iter1(a), Sent1(Iter1(a)));
+ auto subrange = std::ranges::subrange(Iter2(p), Sent2(Iter2(std::end(p))));
+ {
+ bool ret = std::ranges::contains_subrange(whole.begin(), whole.end(), subrange.begin(), subrange.end());
+ assert(!ret);
+ }
+ {
+ bool ret = std::ranges::contains_subrange(whole, subrange);
+ assert(!ret);
+ }
+ }
+
+ { // subrange has zero length
+ int a[] = {3, 4, 2};
+ int p[] = {};
+ auto whole = std::ranges::subrange(Iter1(a), Sent1(Iter1(std::end(a))));
+ auto subrange = std::ranges::subrange(Iter2(p), Sent2(Iter2(p)));
+ {
+ bool ret = std::ranges::contains_subrange(whole.begin(), whole.end(), subrange.begin(), subrange.end());
+ assert(ret);
+ }
+ {
+ bool ret = std::ranges::contains_subrange(whole, subrange);
+ assert(ret);
+ }
+ }
+
+ { // range and subrange both have zero length
+ int a[] = {};
+ int p[] = {};
+ auto whole = std::ranges::subrange(Iter1(a), Sent1(Iter1(a)));
+ auto subrange = std::ranges::subrange(Iter2(p), Sent2(Iter2(p)));
+ {
+ bool ret = std::ranges::contains_subrange(whole.begin(), whole.end(), subrange.begin(), subrange.end());
+ assert(ret);
+ }
+ {
+ bool ret = std::ranges::contains_subrange(whole, subrange);
+ assert(ret);
+ }
+ }
+
+ { // range and subrange are identical
+ int a[] = {3, 4, 11, 32, 54, 2};
+ int p[] = {3, 4, 11, 32, 54, 2};
+ auto whole = std::ranges::subrange(Iter1(a), Sent1(Iter1(std::end(a))));
+ auto subrange = std::ranges::subrange(Iter2(p), Sent2(Iter2(std::end(p))));
+ {
+ bool ret = std::ranges::contains_subrange(whole.begin(), whole.end(), subrange.begin(), subrange.end());
+ assert(ret);
+ }
+ {
+ bool ret = std::ranges::contains_subrange(whole, subrange);
+ assert(ret);
+ }
+ }
+
+ { // subrange is longer than range
+ int a[] = {3, 4, 2};
+ int p[] = {23, 3, 4, 2, 11, 32, 54, 2};
+ auto whole = std::ranges::subrange(Iter1(a), Sent1(Iter1(std::end(a))));
+ auto subrange = std::ranges::subrange(Iter2(p), Sent2(Iter2(std::end(p))));
+ {
+ bool ret = std::ranges::contains_subrange(whole.begin(), whole.end(), subrange.begin(), subrange.end());
+ assert(!ret);
+ }
+ {
+ bool ret = std::ranges::contains_subrange(whole, subrange);
+ assert(!ret);
+ }
+ }
+
+ { // subrange is the prefix
+ int a[] = {3, 43, 5, 100, 433, 278, 6457, 900};
+ int p[] = {3, 43, 5};
+ auto whole = std::ranges::subrange(Iter1(a), Sent1(Iter1(std::end(a))));
+ auto subrange = std::ranges::subrange(Iter2(p), Sent2(Iter2(std::end(p))));
+ {
+ bool ret = std::ranges::contains_subrange(whole.begin(), whole.end(), subrange.begin(), subrange.end());
+ assert(ret);
+ }
+ {
+ bool ret = std::ranges::contains_subrange(whole, subrange);
+ assert(ret);
+ }
+ }
+
+ { // subrange is the suffix
+ int a[] = {3, 43, 5, 7, 68, 100, 433, 900};
+ int p[] = {100, 433, 900};
+ auto whole = std::ranges::subrange(Iter1(a), Sent1(Iter1(std::end(a))));
+ auto subrange = std::ranges::subrange(Iter2(p), Sent2(Iter2(std::end(p))));
+ {
+ bool ret = std::ranges::contains_subrange(whole.begin(), whole.end(), subrange.begin(), subrange.end());
+ assert(ret);
+ }
+ {
+ bool ret = std::ranges::contains_subrange(whole, subrange);
+ assert(ret);
+ }
+ }
+
+ { // subrange is a subsequence
+ int a[] = {23, 1, 0, 54, 2};
+ int p[] = {1, 0, 2};
+ auto whole = std::ranges::subrange(Iter1(a), Sent1(Iter1(std::end(a))));
+ auto subrange = std::ranges::subrange(Iter2(p), Sent2(Iter2(std::end(p))));
+ {
+ bool ret = std::ranges::contains_subrange(whole.begin(), whole.end(), subrange.begin(), subrange.end());
+ assert(!ret);
+ }
+ {
+ bool ret = std::ranges::contains_subrange(whole, subrange);
+ assert(!ret);
+ }
+ }
+
+ { // repeated subrange
+ int a[] = {23, 1, 0, 2, 54, 1, 0, 2, 23, 33};
+ int p[] = {1, 0, 2};
+ auto whole = std::ranges::subrange(Iter1(a), Sent1(Iter1(std::end(a))));
+ auto subrange = std::ranges::subrange(Iter2(p), Sent2(Iter2(std::end(p))));
+ {
+ bool ret = std::ranges::contains_subrange(whole.begin(), whole.end(), subrange.begin(), subrange.end());
+ assert(ret);
+ }
+ {
+ bool ret = std::ranges::contains_subrange(whole, subrange);
+ assert(ret);
+ }
+ }
+
+ { // check that the predicate is used
+ int a[] = {23, 81, 61, 0, 42, 25, 1, 2, 1, 29, 2};
+ int p[] = {-1, -2, -1};
+ auto pred = [](int l, int r) { return l * -1 == r; };
+ auto whole = std::ranges::subrange(Iter1(a), Sent1(Iter1(std::end(a))));
+ auto subrange = std::ranges::subrange(Iter2(p), Sent2(Iter2(std::end(p))));
+ {
+ bool ret = std::ranges::contains_subrange(whole.begin(), whole.end(), subrange.begin(), subrange.end(), pred);
+ assert(ret);
+ }
+ {
+ bool ret = std::ranges::contains_subrange(whole, subrange, pred);
+ assert(ret);
+ }
+ }
+
+ { // check that the projections are used
+ int a[] = {1, 3, 15, 1, 2, 1, 8};
+ int p[] = {2, 1, 2};
+ auto whole = std::ranges::subrange(Iter1(a), Sent1(Iter1(std::end(a))));
+ auto subrange = std::ranges::subrange(Iter2(p), Sent2(Iter2(std::end(p))));
+ auto proj1 = [](int i) { return i - 3; };
+ auto proj2 = [](int i) { return i * -1; };
+ {
+ bool ret = std::ranges::contains_subrange(
+ whole.begin(), whole.end(), subrange.begin(), subrange.end(), {}, proj1, proj2);
+ assert(ret);
+ }
+ {
+ bool ret = std::ranges::contains_subrange(whole, subrange, {}, proj1, proj2);
+ assert(ret);
+ }
+ }
+}
+
+constexpr bool test() {
+ types::for_each(types::forward_iterator_list<int*>{}, []<class Iter1> {
+ types::for_each(types::forward_iterator_list<int*>{}, []<class Iter2> {
+ test_iterators<Iter1, Iter1, Iter2, Iter2>();
+ test_iterators<Iter1, Iter1, Iter2, sized_sentinel<Iter2>>();
+ test_iterators<Iter1, sized_sentinel<Iter1>, Iter2, Iter2>();
+ test_iterators<Iter1, sized_sentinel<Iter1>, Iter2, sized_sentinel<Iter2>>();
+ });
+ });
+
+ return true;
+}
+
+int main(int, char**) {
+ test();
+ static_assert(test());
+
+ return 0;
+}
diff --git a/libcxx/test/std/input.output/string.streams/istringstream/types.compile.pass.cpp b/libcxx/test/std/input.output/string.streams/istringstream/types.compile.pass.cpp
new file mode 100644
index 0000000..7be260ff7
--- /dev/null
+++ b/libcxx/test/std/input.output/string.streams/istringstream/types.compile.pass.cpp
@@ -0,0 +1,75 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// <sstream>
+
+// template <class charT, class traits = char_traits<charT>, class Allocator = allocator<charT> >
+// class basic_istringstream
+// : public basic_istream<charT, traits>
+// {
+// public:
+// typedef charT char_type;
+// typedef traits traits_type;
+// typedef typename traits_type::int_type int_type;
+// typedef typename traits_type::pos_type pos_type;
+// typedef typename traits_type::off_type off_type;
+// typedef Allocator allocator_type;
+//
+// basic_istringstream(const basic_istringstream&) = delete;
+// basic_istringstream& operator=(const basic_istringstream&) = delete;
+//
+// basic_istringstream(basic_istringstream&& rhs);
+// basic_istringstream& operator=(basic_istringstream&& rhs);
+
+#include <sstream>
+#include <type_traits>
+
+#include "test_macros.h"
+
+// Types
+
+static_assert(std::is_base_of<std::basic_istream<char>, std::basic_istringstream<char> >::value, "");
+static_assert(std::is_same<std::basic_istringstream<char>::char_type, char>::value, "");
+static_assert(std::is_same<std::basic_istringstream<char>::traits_type, std::char_traits<char> >::value, "");
+static_assert(std::is_same<std::basic_istringstream<char>::int_type, std::char_traits<char>::int_type>::value, "");
+static_assert(std::is_same<std::basic_istringstream<char>::pos_type, std::char_traits<char>::pos_type>::value, "");
+static_assert(std::is_same<std::basic_istringstream<char>::off_type, std::char_traits<char>::off_type>::value, "");
+static_assert(std::is_same<std::basic_istringstream<char>::allocator_type, std::allocator<char> >::value, "");
+
+#ifndef TEST_HAS_NO_WIDE_CHARACTERS
+static_assert(std::is_base_of<std::basic_istream<wchar_t>, std::basic_istringstream<wchar_t> >::value, "");
+static_assert(std::is_same<std::basic_istringstream<wchar_t>::char_type, wchar_t>::value, "");
+static_assert(std::is_same<std::basic_istringstream<wchar_t>::traits_type, std::char_traits<wchar_t> >::value, "");
+static_assert(std::is_same<std::basic_istringstream<wchar_t>::int_type, std::char_traits<wchar_t>::int_type>::value,
+ "");
+static_assert(std::is_same<std::basic_istringstream<wchar_t>::pos_type, std::char_traits<wchar_t>::pos_type>::value,
+ "");
+static_assert(std::is_same<std::basic_istringstream<wchar_t>::off_type, std::char_traits<wchar_t>::off_type>::value,
+ "");
+static_assert(std::is_same<std::basic_istringstream<wchar_t>::allocator_type, std::allocator<wchar_t> >::value, "");
+#endif
+
+// Copy properties
+
+static_assert(!std::is_copy_constructible<std::istringstream>::value, "");
+static_assert(!std::is_copy_assignable<std::istringstream>::value, "");
+
+#ifndef TEST_HAS_NO_WIDE_CHARACTERS
+static_assert(!std::is_copy_constructible<std::wistringstream>::value, "");
+static_assert(!std::is_copy_assignable<std::wistringstream>::value, "");
+#endif
+
+// Move properties
+
+static_assert(std::is_move_constructible<std::istringstream>::value, "");
+static_assert(std::is_move_assignable<std::istringstream>::value, "");
+
+#ifndef TEST_HAS_NO_WIDE_CHARACTERS
+static_assert(std::is_move_constructible<std::wistringstream>::value, "");
+static_assert(std::is_move_assignable<std::wistringstream>::value, "");
+#endif
diff --git a/libcxx/test/std/input.output/string.streams/istringstream/types.pass.cpp b/libcxx/test/std/input.output/string.streams/istringstream/types.pass.cpp
deleted file mode 100644
index da1b1de..0000000
--- a/libcxx/test/std/input.output/string.streams/istringstream/types.pass.cpp
+++ /dev/null
@@ -1,39 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// <sstream>
-
-// template <class charT, class traits = char_traits<charT>, class Allocator = allocator<charT> >
-// class basic_istringstream
-// : public basic_istream<charT, traits>
-// {
-// public:
-// typedef charT char_type;
-// typedef traits traits_type;
-// typedef typename traits_type::int_type int_type;
-// typedef typename traits_type::pos_type pos_type;
-// typedef typename traits_type::off_type off_type;
-// typedef Allocator allocator_type;
-
-#include <sstream>
-#include <type_traits>
-
-#include "test_macros.h"
-
-int main(int, char**)
-{
- static_assert((std::is_base_of<std::basic_istream<char>, std::basic_istringstream<char> >::value), "");
- static_assert((std::is_same<std::basic_istringstream<char>::char_type, char>::value), "");
- static_assert((std::is_same<std::basic_istringstream<char>::traits_type, std::char_traits<char> >::value), "");
- static_assert((std::is_same<std::basic_istringstream<char>::int_type, std::char_traits<char>::int_type>::value), "");
- static_assert((std::is_same<std::basic_istringstream<char>::pos_type, std::char_traits<char>::pos_type>::value), "");
- static_assert((std::is_same<std::basic_istringstream<char>::off_type, std::char_traits<char>::off_type>::value), "");
- static_assert((std::is_same<std::basic_istringstream<char>::allocator_type, std::allocator<char> >::value), "");
-
- return 0;
-}
diff --git a/libcxx/test/std/input.output/string.streams/ostringstream/types.compile.pass.cpp b/libcxx/test/std/input.output/string.streams/ostringstream/types.compile.pass.cpp
new file mode 100644
index 0000000..df712bb
--- /dev/null
+++ b/libcxx/test/std/input.output/string.streams/ostringstream/types.compile.pass.cpp
@@ -0,0 +1,75 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// <sstream>
+
+// template <class charT, class traits = char_traits<charT>, class Allocator = allocator<charT> >
+// class basic_ostringstream
+// : public basic_ostream<charT, traits>
+// {
+// public:
+// typedef charT char_type;
+// typedef traits traits_type;
+// typedef typename traits_type::int_type int_type;
+// typedef typename traits_type::pos_type pos_type;
+// typedef typename traits_type::off_type off_type;
+// typedef Allocator allocator_type;
+//
+// basic_ostringstream(const basic_ostringstream&) = delete;
+// basic_ostringstream& operator=(const basic_ostringstream&) = delete;
+//
+// basic_ostringstream(basic_ostringstream&& rhs);
+// basic_ostringstream& operator=(basic_ostringstream&& rhs);
+
+#include <sstream>
+#include <type_traits>
+
+#include "test_macros.h"
+
+// Types
+
+static_assert(std::is_base_of<std::basic_ostream<char>, std::basic_ostringstream<char> >::value, "");
+static_assert(std::is_same<std::basic_ostringstream<char>::char_type, char>::value, "");
+static_assert(std::is_same<std::basic_ostringstream<char>::traits_type, std::char_traits<char> >::value, "");
+static_assert(std::is_same<std::basic_ostringstream<char>::int_type, std::char_traits<char>::int_type>::value, "");
+static_assert(std::is_same<std::basic_ostringstream<char>::pos_type, std::char_traits<char>::pos_type>::value, "");
+static_assert(std::is_same<std::basic_ostringstream<char>::off_type, std::char_traits<char>::off_type>::value, "");
+static_assert(std::is_same<std::basic_ostringstream<char>::allocator_type, std::allocator<char> >::value, "");
+
+#ifndef TEST_HAS_NO_WIDE_CHARACTERS
+static_assert(std::is_base_of<std::basic_ostream<wchar_t>, std::basic_ostringstream<wchar_t> >::value, "");
+static_assert(std::is_same<std::basic_ostringstream<wchar_t>::char_type, wchar_t>::value, "");
+static_assert(std::is_same<std::basic_ostringstream<wchar_t>::traits_type, std::char_traits<wchar_t> >::value, "");
+static_assert(std::is_same<std::basic_ostringstream<wchar_t>::int_type, std::char_traits<wchar_t>::int_type>::value,
+ "");
+static_assert(std::is_same<std::basic_ostringstream<wchar_t>::pos_type, std::char_traits<wchar_t>::pos_type>::value,
+ "");
+static_assert(std::is_same<std::basic_ostringstream<wchar_t>::off_type, std::char_traits<wchar_t>::off_type>::value,
+ "");
+static_assert(std::is_same<std::basic_ostringstream<wchar_t>::allocator_type, std::allocator<wchar_t> >::value, "");
+#endif
+
+// Copy properties
+
+static_assert(!std::is_copy_constructible<std::ostringstream>::value, "");
+static_assert(!std::is_copy_assignable<std::ostringstream>::value, "");
+
+#ifndef TEST_HAS_NO_WIDE_CHARACTERS
+static_assert(!std::is_copy_constructible<std::wostringstream>::value, "");
+static_assert(!std::is_copy_assignable<std::wostringstream>::value, "");
+#endif
+
+// Move properties
+
+static_assert(std::is_move_constructible<std::ostringstream>::value, "");
+static_assert(std::is_move_assignable<std::ostringstream>::value, "");
+
+#ifndef TEST_HAS_NO_WIDE_CHARACTERS
+static_assert(std::is_move_constructible<std::wostringstream>::value, "");
+static_assert(std::is_move_assignable<std::wostringstream>::value, "");
+#endif
diff --git a/libcxx/test/std/input.output/string.streams/ostringstream/types.pass.cpp b/libcxx/test/std/input.output/string.streams/ostringstream/types.pass.cpp
deleted file mode 100644
index b294192..0000000
--- a/libcxx/test/std/input.output/string.streams/ostringstream/types.pass.cpp
+++ /dev/null
@@ -1,39 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// <sstream>
-
-// template <class charT, class traits = char_traits<charT>, class Allocator = allocator<charT> >
-// class basic_ostringstream
-// : public basic_ostream<charT, traits>
-// {
-// public:
-// typedef charT char_type;
-// typedef traits traits_type;
-// typedef typename traits_type::int_type int_type;
-// typedef typename traits_type::pos_type pos_type;
-// typedef typename traits_type::off_type off_type;
-// typedef Allocator allocator_type;
-
-#include <sstream>
-#include <type_traits>
-
-#include "test_macros.h"
-
-int main(int, char**)
-{
- static_assert((std::is_base_of<std::basic_ostream<char>, std::basic_ostringstream<char> >::value), "");
- static_assert((std::is_same<std::basic_ostringstream<char>::char_type, char>::value), "");
- static_assert((std::is_same<std::basic_ostringstream<char>::traits_type, std::char_traits<char> >::value), "");
- static_assert((std::is_same<std::basic_ostringstream<char>::int_type, std::char_traits<char>::int_type>::value), "");
- static_assert((std::is_same<std::basic_ostringstream<char>::pos_type, std::char_traits<char>::pos_type>::value), "");
- static_assert((std::is_same<std::basic_ostringstream<char>::off_type, std::char_traits<char>::off_type>::value), "");
- static_assert((std::is_same<std::basic_ostringstream<char>::allocator_type, std::allocator<char> >::value), "");
-
- return 0;
-}
diff --git a/libcxx/test/std/input.output/string.streams/stringbuf/types.compile.pass.cpp b/libcxx/test/std/input.output/string.streams/stringbuf/types.compile.pass.cpp
new file mode 100644
index 0000000..7743d3a
--- /dev/null
+++ b/libcxx/test/std/input.output/string.streams/stringbuf/types.compile.pass.cpp
@@ -0,0 +1,70 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// <sstream>
+
+// template <class charT, class traits = char_traits<charT>, class Allocator = allocator<charT> >
+// class basic_stringbuf
+// : public basic_streambuf<charT, traits>
+// {
+// public:
+// typedef charT char_type;
+// typedef traits traits_type;
+// typedef typename traits_type::int_type int_type;
+// typedef typename traits_type::pos_type pos_type;
+// typedef typename traits_type::off_type off_type;
+// typedef Allocator allocator_type;
+//
+// basic_stringbuf(const basic_stringbuf&) = delete;
+// basic_stringbuf& operator=(const basic_stringbuf&) = delete;
+//
+// basic_stringbuf(basic_stringbuf&& rhs);
+// basic_stringbuf& operator=(basic_stringbuf&& rhs);
+
+#include <sstream>
+#include <type_traits>
+
+#include "test_macros.h"
+
+static_assert(std::is_base_of<std::basic_streambuf<char>, std::basic_stringbuf<char> >::value, "");
+static_assert(std::is_same<std::basic_stringbuf<char>::char_type, char>::value, "");
+static_assert(std::is_same<std::basic_stringbuf<char>::traits_type, std::char_traits<char> >::value, "");
+static_assert(std::is_same<std::basic_stringbuf<char>::int_type, std::char_traits<char>::int_type>::value, "");
+static_assert(std::is_same<std::basic_stringbuf<char>::pos_type, std::char_traits<char>::pos_type>::value, "");
+static_assert(std::is_same<std::basic_stringbuf<char>::off_type, std::char_traits<char>::off_type>::value, "");
+static_assert(std::is_same<std::basic_stringbuf<char>::allocator_type, std::allocator<char> >::value, "");
+
+#ifndef TEST_HAS_NO_WIDE_CHARACTERS
+static_assert(std::is_base_of<std::basic_streambuf<wchar_t>, std::basic_stringbuf<wchar_t> >::value, "");
+static_assert(std::is_same<std::basic_stringbuf<wchar_t>::char_type, wchar_t>::value, "");
+static_assert(std::is_same<std::basic_stringbuf<wchar_t>::traits_type, std::char_traits<wchar_t> >::value, "");
+static_assert(std::is_same<std::basic_stringbuf<wchar_t>::int_type, std::char_traits<wchar_t>::int_type>::value, "");
+static_assert(std::is_same<std::basic_stringbuf<wchar_t>::pos_type, std::char_traits<wchar_t>::pos_type>::value, "");
+static_assert(std::is_same<std::basic_stringbuf<wchar_t>::off_type, std::char_traits<wchar_t>::off_type>::value, "");
+static_assert(std::is_same<std::basic_stringbuf<wchar_t>::allocator_type, std::allocator<wchar_t> >::value, "");
+#endif
+
+// Copy properties
+
+static_assert(!std::is_copy_constructible<std::basic_stringbuf<char> >::value, "");
+static_assert(!std::is_copy_assignable<std::basic_stringbuf<char> >::value, "");
+
+#ifndef TEST_HAS_NO_WIDE_CHARACTERS
+static_assert(!std::is_copy_constructible<std::basic_stringbuf<wchar_t> >::value, "");
+static_assert(!std::is_copy_assignable<std::basic_stringbuf<wchar_t> >::value, "");
+#endif
+
+// Move properties
+
+static_assert(std::is_move_constructible<std::basic_stringbuf<char> >::value, "");
+static_assert(std::is_move_assignable<std::basic_stringbuf<char> >::value, "");
+
+#ifndef TEST_HAS_NO_WIDE_CHARACTERS
+static_assert(std::is_move_constructible<std::basic_stringbuf<wchar_t> >::value, "");
+static_assert(std::is_move_assignable<std::basic_stringbuf<wchar_t> >::value, "");
+#endif
diff --git a/libcxx/test/std/input.output/string.streams/stringbuf/types.pass.cpp b/libcxx/test/std/input.output/string.streams/stringbuf/types.pass.cpp
deleted file mode 100644
index cec61694..0000000
--- a/libcxx/test/std/input.output/string.streams/stringbuf/types.pass.cpp
+++ /dev/null
@@ -1,39 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// <sstream>
-
-// template <class charT, class traits = char_traits<charT>, class Allocator = allocator<charT> >
-// class basic_stringbuf
-// : public basic_streambuf<charT, traits>
-// {
-// public:
-// typedef charT char_type;
-// typedef traits traits_type;
-// typedef typename traits_type::int_type int_type;
-// typedef typename traits_type::pos_type pos_type;
-// typedef typename traits_type::off_type off_type;
-// typedef Allocator allocator_type;
-
-#include <sstream>
-#include <type_traits>
-
-#include "test_macros.h"
-
-int main(int, char**)
-{
- static_assert((std::is_base_of<std::basic_streambuf<char>, std::basic_stringbuf<char> >::value), "");
- static_assert((std::is_same<std::basic_stringbuf<char>::char_type, char>::value), "");
- static_assert((std::is_same<std::basic_stringbuf<char>::traits_type, std::char_traits<char> >::value), "");
- static_assert((std::is_same<std::basic_stringbuf<char>::int_type, std::char_traits<char>::int_type>::value), "");
- static_assert((std::is_same<std::basic_stringbuf<char>::pos_type, std::char_traits<char>::pos_type>::value), "");
- static_assert((std::is_same<std::basic_stringbuf<char>::off_type, std::char_traits<char>::off_type>::value), "");
- static_assert((std::is_same<std::basic_stringbuf<char>::allocator_type, std::allocator<char> >::value), "");
-
- return 0;
-}
diff --git a/libcxx/test/std/input.output/string.streams/stringstream/types.compile.pass.cpp b/libcxx/test/std/input.output/string.streams/stringstream/types.compile.pass.cpp
new file mode 100644
index 0000000..20486c8
--- /dev/null
+++ b/libcxx/test/std/input.output/string.streams/stringstream/types.compile.pass.cpp
@@ -0,0 +1,72 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// <sstream>
+
+// template <class charT, class traits = char_traits<charT>, class Allocator = allocator<charT> >
+// class basic_stringstream
+// : public basic_iostream<charT, traits>
+// {
+// public:
+// typedef charT char_type;
+// typedef traits traits_type;
+// typedef typename traits_type::int_type int_type;
+// typedef typename traits_type::pos_type pos_type;
+// typedef typename traits_type::off_type off_type;
+// typedef Allocator allocator_type;
+//
+// basic_stringstream(const basic_stringstream&) = delete;
+// basic_stringstream& operator=(const basic_stringstream&) = delete;
+//
+// basic_stringstream(basic_stringstream&& rhs);
+// basic_stringstream& operator=(basic_stringstream&& rhs);
+
+#include <sstream>
+#include <type_traits>
+
+#include "test_macros.h"
+
+// Types
+
+static_assert(std::is_base_of<std::basic_iostream<char>, std::basic_stringstream<char> >::value, "");
+static_assert(std::is_same<std::basic_stringstream<char>::char_type, char>::value, "");
+static_assert(std::is_same<std::basic_stringstream<char>::traits_type, std::char_traits<char> >::value, "");
+static_assert(std::is_same<std::basic_stringstream<char>::int_type, std::char_traits<char>::int_type>::value, "");
+static_assert(std::is_same<std::basic_stringstream<char>::pos_type, std::char_traits<char>::pos_type>::value, "");
+static_assert(std::is_same<std::basic_stringstream<char>::off_type, std::char_traits<char>::off_type>::value, "");
+static_assert(std::is_same<std::basic_stringstream<char>::allocator_type, std::allocator<char> >::value, "");
+
+#ifndef TEST_HAS_NO_WIDE_CHARACTERS
+static_assert(std::is_base_of<std::basic_iostream<wchar_t>, std::basic_stringstream<wchar_t> >::value, "");
+static_assert(std::is_same<std::basic_stringstream<wchar_t>::char_type, wchar_t>::value, "");
+static_assert(std::is_same<std::basic_stringstream<wchar_t>::traits_type, std::char_traits<wchar_t> >::value, "");
+static_assert(std::is_same<std::basic_stringstream<wchar_t>::int_type, std::char_traits<wchar_t>::int_type>::value, "");
+static_assert(std::is_same<std::basic_stringstream<wchar_t>::pos_type, std::char_traits<wchar_t>::pos_type>::value, "");
+static_assert(std::is_same<std::basic_stringstream<wchar_t>::off_type, std::char_traits<wchar_t>::off_type>::value, "");
+static_assert(std::is_same<std::basic_stringstream<wchar_t>::allocator_type, std::allocator<wchar_t> >::value, "");
+#endif
+
+// Copy properties
+
+static_assert(!std::is_copy_constructible<std::stringstream>::value, "");
+static_assert(!std::is_copy_assignable<std::stringstream>::value, "");
+
+#ifndef TEST_HAS_NO_WIDE_CHARACTERS
+static_assert(!std::is_copy_constructible<std::wstringstream>::value, "");
+static_assert(!std::is_copy_assignable<std::wstringstream>::value, "");
+#endif
+
+// Move properties
+
+static_assert(std::is_move_constructible<std::stringstream>::value, "");
+static_assert(std::is_move_assignable<std::stringstream>::value, "");
+
+#ifndef TEST_HAS_NO_WIDE_CHARACTERS
+static_assert(std::is_move_constructible<std::wstringstream>::value, "");
+static_assert(std::is_move_assignable<std::wstringstream>::value, "");
+#endif
diff --git a/libcxx/test/std/input.output/string.streams/stringstream/types.pass.cpp b/libcxx/test/std/input.output/string.streams/stringstream/types.pass.cpp
deleted file mode 100644
index 11990e6a..0000000
--- a/libcxx/test/std/input.output/string.streams/stringstream/types.pass.cpp
+++ /dev/null
@@ -1,39 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// <sstream>
-
-// template <class charT, class traits = char_traits<charT>, class Allocator = allocator<charT> >
-// class basic_stringstream
-// : public basic_iostream<charT, traits>
-// {
-// public:
-// typedef charT char_type;
-// typedef traits traits_type;
-// typedef typename traits_type::int_type int_type;
-// typedef typename traits_type::pos_type pos_type;
-// typedef typename traits_type::off_type off_type;
-// typedef Allocator allocator_type;
-
-#include <sstream>
-#include <type_traits>
-
-#include "test_macros.h"
-
-int main(int, char**)
-{
- static_assert((std::is_base_of<std::basic_iostream<char>, std::basic_stringstream<char> >::value), "");
- static_assert((std::is_same<std::basic_stringstream<char>::char_type, char>::value), "");
- static_assert((std::is_same<std::basic_stringstream<char>::traits_type, std::char_traits<char> >::value), "");
- static_assert((std::is_same<std::basic_stringstream<char>::int_type, std::char_traits<char>::int_type>::value), "");
- static_assert((std::is_same<std::basic_stringstream<char>::pos_type, std::char_traits<char>::pos_type>::value), "");
- static_assert((std::is_same<std::basic_stringstream<char>::off_type, std::char_traits<char>::off_type>::value), "");
- static_assert((std::is_same<std::basic_stringstream<char>::allocator_type, std::allocator<char> >::value), "");
-
- return 0;
-}
diff --git a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align.except.pass.cpp b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align.except.pass.cpp
index cde7dc5..4e34ebc 100644
--- a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align.except.pass.cpp
+++ b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align.except.pass.cpp
@@ -9,6 +9,10 @@
// UNSUPPORTED: no-exceptions
// UNSUPPORTED: sanitizer-new-delete
+// Libc++ when built for z/OS doesn't contain the aligned allocation functions,
+// nor does the dynamic library shipped with z/OS.
+// XFAIL: target={{.+}}-zos{{.*}}
+
#include <new>
#include <cassert>
#include <limits>
diff --git a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align_nothrow.except.pass.cpp b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align_nothrow.except.pass.cpp
index 251ba0f..07ec84b 100644
--- a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align_nothrow.except.pass.cpp
+++ b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align_nothrow.except.pass.cpp
@@ -9,6 +9,10 @@
// UNSUPPORTED: no-exceptions
// UNSUPPORTED: sanitizer-new-delete
+// Libc++ when built for z/OS doesn't contain the aligned allocation functions,
+// nor does the dynamic library shipped with z/OS.
+// XFAIL: target={{.+}}-zos{{.*}}
+
#include <new>
#include <cassert>
#include <limits>
diff --git a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align.except.pass.cpp b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align.except.pass.cpp
index cb83fb2..7694314 100644
--- a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align.except.pass.cpp
+++ b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align.except.pass.cpp
@@ -9,6 +9,10 @@
// UNSUPPORTED: no-exceptions
// UNSUPPORTED: sanitizer-new-delete
+// Libc++ when built for z/OS doesn't contain the aligned allocation functions,
+// nor does the dynamic library shipped with z/OS.
+// XFAIL: target={{.+}}-zos{{.*}}
+
#include <new>
#include <cassert>
#include <limits>
diff --git a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align_nothrow.except.pass.cpp b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align_nothrow.except.pass.cpp
index d95e78e..bf61d0b 100644
--- a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align_nothrow.except.pass.cpp
+++ b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align_nothrow.except.pass.cpp
@@ -9,6 +9,10 @@
// UNSUPPORTED: no-exceptions
// UNSUPPORTED: sanitizer-new-delete
+// Libc++ when built for z/OS doesn't contain the aligned allocation functions,
+// nor does the dynamic library shipped with z/OS.
+// XFAIL: target={{.+}}-zos{{.*}}
+
#include <new>
#include <cassert>
#include <limits>
diff --git a/libcxx/test/std/library/description/conventions/customization.point.object/niebloid.compile.pass.cpp b/libcxx/test/std/library/description/conventions/customization.point.object/niebloid.compile.pass.cpp
index 494e9fd..9506ca1 100644
--- a/libcxx/test/std/library/description/conventions/customization.point.object/niebloid.compile.pass.cpp
+++ b/libcxx/test/std/library/description/conventions/customization.point.object/niebloid.compile.pass.cpp
@@ -67,6 +67,7 @@ static_assert(test(std::ranges::binary_search, a, 42));
static_assert(test(std::ranges::clamp, 42, 42, 42));
#if TEST_STD_VER >= 23
static_assert(test(std::ranges::contains, a, 42));
+static_assert(test(std::ranges::contains_subrange, a, a));
#endif
static_assert(test(std::ranges::copy, a, a));
static_assert(test(std::ranges::copy_backward, a, a));
diff --git a/lld/COFF/Driver.cpp b/lld/COFF/Driver.cpp
index 091aa0d..22ee2f1 100644
--- a/lld/COFF/Driver.cpp
+++ b/lld/COFF/Driver.cpp
@@ -939,7 +939,7 @@ std::string LinkerDriver::getImportName(bool asLib) {
void LinkerDriver::createImportLibrary(bool asLib) {
llvm::TimeTraceScope timeScope("Create import library");
- std::vector<COFFShortExport> exports, nativeExports;
+ std::vector<COFFShortExport> exports;
for (Export &e1 : ctx.config.exports) {
COFFShortExport e2;
e2.Name = std::string(e1.name);
@@ -958,8 +958,8 @@ void LinkerDriver::createImportLibrary(bool asLib) {
std::string path = getImplibPath();
if (!ctx.config.incremental) {
- checkError(writeImportLibrary(libName, path, exports, nativeExports,
- ctx.config.machine, ctx.config.mingw));
+ checkError(writeImportLibrary(libName, path, exports, ctx.config.machine,
+ ctx.config.mingw));
return;
}
@@ -968,8 +968,8 @@ void LinkerDriver::createImportLibrary(bool asLib) {
ErrorOr<std::unique_ptr<MemoryBuffer>> oldBuf = MemoryBuffer::getFile(
path, /*IsText=*/false, /*RequiresNullTerminator=*/false);
if (!oldBuf) {
- checkError(writeImportLibrary(libName, path, exports, nativeExports,
- ctx.config.machine, ctx.config.mingw));
+ checkError(writeImportLibrary(libName, path, exports, ctx.config.machine,
+ ctx.config.mingw));
return;
}
@@ -979,7 +979,7 @@ void LinkerDriver::createImportLibrary(bool asLib) {
fatal("cannot create temporary file for import library " + path + ": " +
ec.message());
- if (Error e = writeImportLibrary(libName, tmpName, exports, nativeExports,
+ if (Error e = writeImportLibrary(libName, tmpName, exports,
ctx.config.machine, ctx.config.mingw)) {
checkError(std::move(e));
return;
diff --git a/lld/ELF/Arch/LoongArch.cpp b/lld/ELF/Arch/LoongArch.cpp
index 3e9d6e0..49fd979 100644
--- a/lld/ELF/Arch/LoongArch.cpp
+++ b/lld/ELF/Arch/LoongArch.cpp
@@ -75,7 +75,7 @@ enum Reg {
//
// Here a "page" is in fact just another way to refer to the 12-bit range
// allowed by the immediate field of the addi/ld/st instructions, and not
-// related to the system or the kernel's actual page size. The sematics happens
+// related to the system or the kernel's actual page size. The semantics happen
// to match the AArch64 `adrp`, so the concept of "page" is borrowed here.
static uint64_t getLoongArchPage(uint64_t p) {
return p & ~static_cast<uint64_t>(0xfff);
@@ -86,7 +86,7 @@ static uint32_t lo12(uint32_t val) { return val & 0xfff; }
// Calculate the adjusted page delta between dest and PC.
uint64_t elf::getLoongArchPageDelta(uint64_t dest, uint64_t pc, RelType type) {
// Note that if the sequence being relocated is `pcalau12i + addi.d + lu32i.d
- // + lu52i.d`, they must be adjancent so that we can infer the PC of
+ // + lu52i.d`, they must be adjacent so that we can infer the PC of
// `pcalau12i` when calculating the page delta for the other two instructions
// (lu32i.d and lu52i.d). Compensate all the sign-extensions is a bit
// complicated. Just use psABI recommended algorithm.
@@ -539,7 +539,7 @@ void LoongArch::relocate(uint8_t *loc, const Relocation &rel,
return;
case R_LARCH_CALL36: {
- // This relocation is designed for adjancent pcaddu18i+jirl pairs that
+ // This relocation is designed for adjacent pcaddu18i+jirl pairs that
// are patched in one time. Because of sign extension of these insns'
// immediate fields, the relocation range is [-128G - 0x20000, +128G -
// 0x20000) (of course must be 4-byte aligned).
diff --git a/lld/ELF/Arch/SystemZ.cpp b/lld/ELF/Arch/SystemZ.cpp
new file mode 100644
index 0000000..d37db68
--- /dev/null
+++ b/lld/ELF/Arch/SystemZ.cpp
@@ -0,0 +1,607 @@
+//===- SystemZ.cpp --------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "OutputSections.h"
+#include "Symbols.h"
+#include "SyntheticSections.h"
+#include "Target.h"
+#include "lld/Common/ErrorHandler.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/Support/Endian.h"
+
+using namespace llvm;
+using namespace llvm::support::endian;
+using namespace llvm::ELF;
+using namespace lld;
+using namespace lld::elf;
+
+namespace {
+class SystemZ : public TargetInfo {
+public:
+ SystemZ();
+ int getTlsGdRelaxSkip(RelType type) const override;
+ RelExpr getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const override;
+ RelType getDynRel(RelType type) const override;
+ void writeGotHeader(uint8_t *buf) const override;
+ void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
+ void writeIgotPlt(uint8_t *buf, const Symbol &s) const override;
+ void writePltHeader(uint8_t *buf) const override;
+ void addPltHeaderSymbols(InputSection &isd) const override;
+ void writePlt(uint8_t *buf, const Symbol &sym,
+ uint64_t pltEntryAddr) const override;
+ RelExpr adjustTlsExpr(RelType type, RelExpr expr) const override;
+ RelExpr adjustGotPcExpr(RelType type, int64_t addend,
+ const uint8_t *loc) const override;
+ bool relaxOnce(int pass) const override;
+ void relocate(uint8_t *loc, const Relocation &rel,
+ uint64_t val) const override;
+ int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override;
+
+private:
+ void relaxGot(uint8_t *loc, const Relocation &rel, uint64_t val) const;
+ void relaxTlsGdToIe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
+ void relaxTlsGdToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
+ void relaxTlsLdToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
+};
+} // namespace
+
+SystemZ::SystemZ() {
+ copyRel = R_390_COPY;
+ gotRel = R_390_GLOB_DAT;
+ pltRel = R_390_JMP_SLOT;
+ relativeRel = R_390_RELATIVE;
+ iRelativeRel = R_390_IRELATIVE;
+ symbolicRel = R_390_64;
+ tlsGotRel = R_390_TLS_TPOFF;
+ tlsModuleIndexRel = R_390_TLS_DTPMOD;
+ tlsOffsetRel = R_390_TLS_DTPOFF;
+ gotHeaderEntriesNum = 3;
+ gotPltHeaderEntriesNum = 0;
+ gotEntrySize = 8;
+ pltHeaderSize = 32;
+ pltEntrySize = 32;
+ ipltEntrySize = 32;
+
+ // This "trap instruction" is used to fill gaps between sections.
+ // On SystemZ, the behavior of the GNU ld is to fill those gaps
+ // with nop instructions instead - and unfortunately the default
+ // glibc crt object files (used to) rely on that behavior since
+ // they use an alignment on the .init section fragments that causes
+ // gaps which must be filled with nops as they are being executed.
+ // Therefore, we provide a nop instruction as "trapInstr" here.
+ trapInstr = {0x07, 0x07, 0x07, 0x07};
+
+ defaultImageBase = 0x1000000;
+}
+
+RelExpr SystemZ::getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const {
+ switch (type) {
+ case R_390_NONE:
+ return R_NONE;
+ // Relocations targeting the symbol value.
+ case R_390_8:
+ case R_390_12:
+ case R_390_16:
+ case R_390_20:
+ case R_390_32:
+ case R_390_64:
+ return R_ABS;
+ case R_390_PC16:
+ case R_390_PC32:
+ case R_390_PC64:
+ case R_390_PC12DBL:
+ case R_390_PC16DBL:
+ case R_390_PC24DBL:
+ case R_390_PC32DBL:
+ return R_PC;
+ case R_390_GOTOFF16:
+ case R_390_GOTOFF: // a.k.a. R_390_GOTOFF32
+ case R_390_GOTOFF64:
+ return R_GOTREL;
+ // Relocations targeting the PLT associated with the symbol.
+ case R_390_PLT32:
+ case R_390_PLT64:
+ case R_390_PLT12DBL:
+ case R_390_PLT16DBL:
+ case R_390_PLT24DBL:
+ case R_390_PLT32DBL:
+ return R_PLT_PC;
+ case R_390_PLTOFF16:
+ case R_390_PLTOFF32:
+ case R_390_PLTOFF64:
+ return R_PLT_GOTREL;
+ // Relocations targeting the GOT entry associated with the symbol.
+ case R_390_GOTENT:
+ return R_GOT_PC;
+ case R_390_GOT12:
+ case R_390_GOT16:
+ case R_390_GOT20:
+ case R_390_GOT32:
+ case R_390_GOT64:
+ return R_GOT_OFF;
+ // Relocations targeting the GOTPLT entry associated with the symbol.
+ case R_390_GOTPLTENT:
+ return R_GOTPLT_PC;
+ case R_390_GOTPLT12:
+ case R_390_GOTPLT16:
+ case R_390_GOTPLT20:
+ case R_390_GOTPLT32:
+ case R_390_GOTPLT64:
+ return R_GOTPLT_GOTREL;
+ // Relocations targeting _GLOBAL_OFFSET_TABLE_.
+ case R_390_GOTPC:
+ case R_390_GOTPCDBL:
+ return R_GOTONLY_PC;
+ // TLS-related relocations.
+ case R_390_TLS_LOAD:
+ return R_NONE;
+ case R_390_TLS_GDCALL:
+ return R_TLSGD_PC;
+ case R_390_TLS_LDCALL:
+ return R_TLSLD_PC;
+ case R_390_TLS_GD32:
+ case R_390_TLS_GD64:
+ return R_TLSGD_GOT;
+ case R_390_TLS_LDM32:
+ case R_390_TLS_LDM64:
+ return R_TLSLD_GOT;
+ case R_390_TLS_LDO32:
+ case R_390_TLS_LDO64:
+ return R_DTPREL;
+ case R_390_TLS_LE32:
+ case R_390_TLS_LE64:
+ return R_TPREL;
+ case R_390_TLS_IE32:
+ case R_390_TLS_IE64:
+ return R_GOT;
+ case R_390_TLS_GOTIE12:
+ case R_390_TLS_GOTIE20:
+ case R_390_TLS_GOTIE32:
+ case R_390_TLS_GOTIE64:
+ return R_GOT_OFF;
+ case R_390_TLS_IEENT:
+ return R_GOT_PC;
+
+ default:
+ error(getErrorLocation(loc) + "unknown relocation (" + Twine(type) +
+ ") against symbol " + toString(s));
+ return R_NONE;
+ }
+}
+
+void SystemZ::writeGotHeader(uint8_t *buf) const {
+ // _GLOBAL_OFFSET_TABLE_[0] holds the value of _DYNAMIC.
+ // _GLOBAL_OFFSET_TABLE_[1] and [2] are reserved.
+ write64be(buf, mainPart->dynamic->getVA());
+}
+
+void SystemZ::writeGotPlt(uint8_t *buf, const Symbol &s) const {
+ write64be(buf, s.getPltVA() + 14);
+}
+
+void SystemZ::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
+ if (config->writeAddends)
+ write64be(buf, s.getVA());
+}
+
+void SystemZ::writePltHeader(uint8_t *buf) const {
+ const uint8_t pltData[] = {
+ 0xe3, 0x10, 0xf0, 0x38, 0x00, 0x24, // stg %r1,56(%r15)
+ 0xc0, 0x10, 0x00, 0x00, 0x00, 0x00, // larl %r1,_GLOBAL_OFFSET_TABLE_
+ 0xd2, 0x07, 0xf0, 0x30, 0x10, 0x08, // mvc 48(8,%r15),8(%r1)
+ 0xe3, 0x10, 0x10, 0x10, 0x00, 0x04, // lg %r1,16(%r1)
+ 0x07, 0xf1, // br %r1
+ 0x07, 0x00, // nopr
+ 0x07, 0x00, // nopr
+ 0x07, 0x00, // nopr
+ };
+ memcpy(buf, pltData, sizeof(pltData));
+ uint64_t got = in.got->getVA();
+ uint64_t plt = in.plt->getVA();
+ write32be(buf + 8, (got - plt - 6) >> 1);
+}
+
+void SystemZ::addPltHeaderSymbols(InputSection &isec) const {
+ // The PLT header needs a reference to _GLOBAL_OFFSET_TABLE_, so we
+ // must ensure the .got section is created even if otherwise unused.
+ in.got->hasGotOffRel.store(true, std::memory_order_relaxed);
+}
+
+void SystemZ::writePlt(uint8_t *buf, const Symbol &sym,
+ uint64_t pltEntryAddr) const {
+ const uint8_t inst[] = {
+ 0xc0, 0x10, 0x00, 0x00, 0x00, 0x00, // larl %r1,<.got.plt slot>
+ 0xe3, 0x10, 0x10, 0x00, 0x00, 0x04, // lg %r1,0(%r1)
+ 0x07, 0xf1, // br %r1
+ 0x0d, 0x10, // basr %r1,%r0
+ 0xe3, 0x10, 0x10, 0x0c, 0x00, 0x14, // lgf %r1,12(%r1)
+ 0xc0, 0xf4, 0x00, 0x00, 0x00, 0x00, // jg <plt header>
+ 0x00, 0x00, 0x00, 0x00, // <relocation offset>
+ };
+ memcpy(buf, inst, sizeof(inst));
+
+ write32be(buf + 2, (sym.getGotPltVA() - pltEntryAddr) >> 1);
+ write32be(buf + 24, (in.plt->getVA() - pltEntryAddr - 22) >> 1);
+ write32be(buf + 28, in.relaPlt->entsize * sym.getPltIdx());
+}
+
+int64_t SystemZ::getImplicitAddend(const uint8_t *buf, RelType type) const {
+ switch (type) {
+ case R_390_8:
+ return SignExtend64<8>(*buf);
+ case R_390_16:
+ case R_390_PC16:
+ return SignExtend64<16>(read16be(buf));
+ case R_390_PC16DBL:
+ return SignExtend64<16>(read16be(buf)) << 1;
+ case R_390_32:
+ case R_390_PC32:
+ return SignExtend64<32>(read32be(buf));
+ case R_390_PC32DBL:
+ return SignExtend64<32>(read32be(buf)) << 1;
+ case R_390_64:
+ case R_390_PC64:
+ case R_390_TLS_DTPMOD:
+ case R_390_TLS_DTPOFF:
+ case R_390_TLS_TPOFF:
+ case R_390_GLOB_DAT:
+ case R_390_RELATIVE:
+ case R_390_IRELATIVE:
+ return read64be(buf);
+ case R_390_COPY:
+ case R_390_JMP_SLOT:
+ case R_390_NONE:
+ // These relocations are defined as not having an implicit addend.
+ return 0;
+ default:
+ internalLinkerError(getErrorLocation(buf),
+ "cannot read addend for relocation " + toString(type));
+ return 0;
+ }
+}
+
+RelType SystemZ::getDynRel(RelType type) const {
+ if (type == R_390_64 || type == R_390_PC64)
+ return type;
+ return R_390_NONE;
+}
+
+RelExpr SystemZ::adjustTlsExpr(RelType type, RelExpr expr) const {
+ if (expr == R_RELAX_TLS_GD_TO_IE)
+ return R_RELAX_TLS_GD_TO_IE_GOT_OFF;
+ return expr;
+}
+
+int SystemZ::getTlsGdRelaxSkip(RelType type) const {
+ // A __tls_get_offset call instruction is marked with 2 relocations:
+ //
+ // R_390_TLS_GDCALL / R_390_TLS_LDCALL: marker relocation
+ // R_390_PLT32DBL: __tls_get_offset
+ //
+ // After the relaxation we no longer call __tls_get_offset and should skip
+ // both relocations to not create a false dependence on __tls_get_offset
+ // being defined.
+ //
+ // Note that this mechanism only works correctly if the R_390_TLS_[GL]DCALL
+ // is seen immediately *before* the R_390_PLT32DBL. Unfortunately, current
+ // compilers on the platform will typically generate the inverse sequence.
+ // To fix this, we sort relocations by offset in RelocationScanner::scan;
+ // this ensures the correct sequence as the R_390_TLS_[GL]DCALL applies to
+ // the first byte of the brasl instruction, while the R_390_PLT32DBL applies
+ // to its third byte (the relative displacement).
+
+ if (type == R_390_TLS_GDCALL || type == R_390_TLS_LDCALL)
+ return 2;
+ return 1;
+}
+
+void SystemZ::relaxTlsGdToIe(uint8_t *loc, const Relocation &rel,
+ uint64_t val) const {
+ // The general-dynamic code sequence for a global `x`:
+ //
+ // Instruction Relocation Symbol
+ // ear %rX,%a0
+ // sllg %rX,%rX,32
+ // ear %rX,%a1
+ // larl %r12,_GLOBAL_OFFSET_TABLE_ R_390_GOTPCDBL _GLOBAL_OFFSET_TABLE_
+ // lgrl %r2,.LC0 R_390_PC32DBL .LC0
+ // brasl %r14,__tls_get_offset@plt R_390_TLS_GDCALL x
+ // :tls_gdcall:x R_390_PLT32DBL __tls_get_offset
+ // la %r2,0(%r2,%rX)
+ //
+ // .LC0:
+ // .quad x@TLSGD R_390_TLS_GD64 x
+ //
+ // Relaxing to initial-exec entails:
+ // 1) Replacing the call by a load from the GOT.
+ // 2) Replacing the relocation on the constant LC0 by R_390_TLS_GOTIE64.
+
+ switch (rel.type) {
+ case R_390_TLS_GDCALL:
+ // brasl %r14,__tls_get_offset@plt -> lg %r2,0(%r2,%r12)
+ write16be(loc, 0xe322);
+ write32be(loc + 2, 0xc0000004);
+ break;
+ case R_390_TLS_GD64:
+ relocateNoSym(loc, R_390_TLS_GOTIE64, val);
+ break;
+ default:
+ llvm_unreachable("unsupported relocation for TLS GD to IE relaxation");
+ }
+}
+
+void SystemZ::relaxTlsGdToLe(uint8_t *loc, const Relocation &rel,
+ uint64_t val) const {
+ // The general-dynamic code sequence for a global `x`:
+ //
+ // Instruction Relocation Symbol
+ // ear %rX,%a0
+ // sllg %rX,%rX,32
+ // ear %rX,%a1
+ // larl %r12,_GLOBAL_OFFSET_TABLE_ R_390_GOTPCDBL _GLOBAL_OFFSET_TABLE_
+ // lgrl %r2,.LC0 R_390_PC32DBL .LC0
+ // brasl %r14,__tls_get_offset@plt R_390_TLS_GDCALL x
+ // :tls_gdcall:x R_390_PLT32DBL __tls_get_offset
+ // la %r2,0(%r2,%rX)
+ //
+ // .LC0:
+ // .quad x@tlsgd R_390_TLS_GD64 x
+ //
+ // Relaxing to local-exec entails:
+ // 1) Replacing the call by a nop.
+ // 2) Replacing the relocation on the constant LC0 by R_390_TLS_LE64.
+
+ switch (rel.type) {
+ case R_390_TLS_GDCALL:
+ // brasl %r14,__tls_get_offset@plt -> brcl 0,.
+ write16be(loc, 0xc004);
+ write32be(loc + 2, 0x00000000);
+ break;
+ case R_390_TLS_GD64:
+ relocateNoSym(loc, R_390_TLS_LE64, val);
+ break;
+ default:
+ llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
+ }
+}
+
+void SystemZ::relaxTlsLdToLe(uint8_t *loc, const Relocation &rel,
+ uint64_t val) const {
+ // The local-dynamic code sequence for a global `x`:
+ //
+ // Instruction Relocation Symbol
+ // ear %rX,%a0
+ // sllg %rX,%rX,32
+ // ear %rX,%a1
+ // larl %r12,_GLOBAL_OFFSET_TABLE_ R_390_GOTPCDBL _GLOBAL_OFFSET_TABLE_
+ // lgrl %r2,.LC0 R_390_PC32DBL .LC0
+ // brasl %r14,__tls_get_offset@plt R_390_TLS_LDCALL <sym>
+ // :tls_ldcall:<sym> R_390_PLT32DBL __tls_get_offset
+ // la %r2,0(%r2,%rX)
+ // lgrl %rY,.LC1 R_390_PC32DBL .LC1
+ // la %r2,0(%r2,%rY)
+ //
+ // .LC0:
+ // .quad <sym>@tlsldm R_390_TLS_LDM64 <sym>
+ // .LC1:
+ // .quad x@dtpoff R_390_TLS_LDO64 x
+ //
+ // Relaxing to local-exec entails:
+ // 1) Replacing the call by a nop.
+ // 2) Replacing the constant LC0 by 0 (i.e. ignoring the relocation).
+ // 3) Replacing the relocation on the constant LC1 by R_390_TLS_LE64.
+
+ switch (rel.type) {
+ case R_390_TLS_LDCALL:
+ // brasl %r14,__tls_get_offset@plt -> brcl 0,.
+ write16be(loc, 0xc004);
+ write32be(loc + 2, 0x00000000);
+ break;
+ case R_390_TLS_LDM64:
+ break;
+ case R_390_TLS_LDO64:
+ relocateNoSym(loc, R_390_TLS_LE64, val);
+ break;
+ default:
+ llvm_unreachable("unsupported relocation for TLS LD to LE relaxation");
+ }
+}
+
+RelExpr SystemZ::adjustGotPcExpr(RelType type, int64_t addend,
+ const uint8_t *loc) const {
+ // Only R_390_GOTENT with addend 2 can be relaxed.
+ if (!config->relax || addend != 2 || type != R_390_GOTENT)
+ return R_GOT_PC;
+ const uint16_t op = read16be(loc - 2);
+
+ // lgrl rx,sym@GOTENT -> larl rx, sym
+ // This relaxation is legal if "sym" binds locally (which was already
+ // verified by our caller) and is in-range and properly aligned for a
+ // LARL instruction. We cannot verify the latter constraint here, so
+ // we assume it is true and revert the decision later on in relaxOnce
+ // if necessary.
+ if ((op & 0xff0f) == 0xc408)
+ return R_RELAX_GOT_PC;
+
+ return R_GOT_PC;
+}
+
+bool SystemZ::relaxOnce(int pass) const {
+ // If we decided in adjustGotPcExpr to relax a R_390_GOTENT,
+ // we need to validate the target symbol is in-range and aligned.
+ SmallVector<InputSection *, 0> storage;
+ bool changed = false;
+ for (OutputSection *osec : outputSections) {
+ if (!(osec->flags & SHF_EXECINSTR))
+ continue;
+ for (InputSection *sec : getInputSections(*osec, storage)) {
+ for (Relocation &rel : sec->relocs()) {
+ if (rel.expr != R_RELAX_GOT_PC)
+ continue;
+
+ uint64_t v = sec->getRelocTargetVA(
+ sec->file, rel.type, rel.addend,
+ sec->getOutputSection()->addr + rel.offset, *rel.sym, rel.expr);
+ if (isInt<33>(v) && !(v & 1))
+ continue;
+ if (rel.sym->auxIdx == 0) {
+ rel.sym->allocateAux();
+ addGotEntry(*rel.sym);
+ changed = true;
+ }
+ rel.expr = R_GOT_PC;
+ }
+ }
+ }
+ return changed;
+}
+
+void SystemZ::relaxGot(uint8_t *loc, const Relocation &rel,
+ uint64_t val) const {
+ assert(isInt<33>(val) &&
+ "R_390_GOTENT should not have been relaxed if it overflows");
+ assert(!(val & 1) &&
+ "R_390_GOTENT should not have been relaxed if it is misaligned");
+ const uint16_t op = read16be(loc - 2);
+
+ // lgrl rx,sym@GOTENT -> larl rx, sym
+ if ((op & 0xff0f) == 0xc408) {
+ write16be(loc - 2, 0xc000 | (op & 0x00f0));
+ write32be(loc, val >> 1);
+ }
+}
+
+void SystemZ::relocate(uint8_t *loc, const Relocation &rel,
+ uint64_t val) const {
+ switch (rel.expr) {
+ case R_RELAX_GOT_PC:
+ return relaxGot(loc, rel, val);
+ case R_RELAX_TLS_GD_TO_IE_GOT_OFF:
+ return relaxTlsGdToIe(loc, rel, val);
+ case R_RELAX_TLS_GD_TO_LE:
+ return relaxTlsGdToLe(loc, rel, val);
+ case R_RELAX_TLS_LD_TO_LE:
+ return relaxTlsLdToLe(loc, rel, val);
+ default:
+ break;
+ }
+ switch (rel.type) {
+ case R_390_8:
+ checkIntUInt(loc, val, 8, rel);
+ *loc = val;
+ break;
+ case R_390_12:
+ case R_390_GOT12:
+ case R_390_GOTPLT12:
+ case R_390_TLS_GOTIE12:
+ checkUInt(loc, val, 12, rel);
+ write16be(loc, (read16be(loc) & 0xF000) | val);
+ break;
+ case R_390_PC12DBL:
+ case R_390_PLT12DBL:
+ checkInt(loc, val, 13, rel);
+ checkAlignment(loc, val, 2, rel);
+ write16be(loc, (read16be(loc) & 0xF000) | ((val >> 1) & 0x0FFF));
+ break;
+ case R_390_16:
+ case R_390_GOT16:
+ case R_390_GOTPLT16:
+ case R_390_GOTOFF16:
+ case R_390_PLTOFF16:
+ checkIntUInt(loc, val, 16, rel);
+ write16be(loc, val);
+ break;
+ case R_390_PC16:
+ checkInt(loc, val, 16, rel);
+ write16be(loc, val);
+ break;
+ case R_390_PC16DBL:
+ case R_390_PLT16DBL:
+ checkInt(loc, val, 17, rel);
+ checkAlignment(loc, val, 2, rel);
+ write16be(loc, val >> 1);
+ break;
+ case R_390_20:
+ case R_390_GOT20:
+ case R_390_GOTPLT20:
+ case R_390_TLS_GOTIE20:
+ checkInt(loc, val, 20, rel);
+ write32be(loc, (read32be(loc) & 0xF00000FF) | ((val & 0xFFF) << 16) |
+ ((val & 0xFF000) >> 4));
+ break;
+ case R_390_PC24DBL:
+ case R_390_PLT24DBL:
+ checkInt(loc, val, 25, rel);
+ checkAlignment(loc, val, 2, rel);
+ loc[0] = val >> 17;
+ loc[1] = val >> 9;
+ loc[2] = val >> 1;
+ break;
+ case R_390_32:
+ case R_390_GOT32:
+ case R_390_GOTPLT32:
+ case R_390_GOTOFF:
+ case R_390_PLTOFF32:
+ case R_390_TLS_IE32:
+ case R_390_TLS_GOTIE32:
+ case R_390_TLS_GD32:
+ case R_390_TLS_LDM32:
+ case R_390_TLS_LDO32:
+ case R_390_TLS_LE32:
+ checkIntUInt(loc, val, 32, rel);
+ write32be(loc, val);
+ break;
+ case R_390_PC32:
+ case R_390_PLT32:
+ checkInt(loc, val, 32, rel);
+ write32be(loc, val);
+ break;
+ case R_390_PC32DBL:
+ case R_390_PLT32DBL:
+ case R_390_GOTPCDBL:
+ case R_390_GOTENT:
+ case R_390_GOTPLTENT:
+ case R_390_TLS_IEENT:
+ checkInt(loc, val, 33, rel);
+ checkAlignment(loc, val, 2, rel);
+ write32be(loc, val >> 1);
+ break;
+ case R_390_64:
+ case R_390_PC64:
+ case R_390_PLT64:
+ case R_390_GOT64:
+ case R_390_GOTPLT64:
+ case R_390_GOTOFF64:
+ case R_390_PLTOFF64:
+ case R_390_GOTPC:
+ case R_390_TLS_IE64:
+ case R_390_TLS_GOTIE64:
+ case R_390_TLS_GD64:
+ case R_390_TLS_LDM64:
+ case R_390_TLS_LDO64:
+ case R_390_TLS_LE64:
+ case R_390_TLS_DTPMOD:
+ case R_390_TLS_DTPOFF:
+ case R_390_TLS_TPOFF:
+ write64be(loc, val);
+ break;
+ case R_390_TLS_LOAD:
+ case R_390_TLS_GDCALL:
+ case R_390_TLS_LDCALL:
+ break;
+ default:
+ llvm_unreachable("unknown relocation");
+ }
+}
+
+TargetInfo *elf::getSystemZTargetInfo() {
+ static SystemZ t;
+ return &t;
+}
diff --git a/lld/ELF/CMakeLists.txt b/lld/ELF/CMakeLists.txt
index 475f7de..83d816dd 100644
--- a/lld/ELF/CMakeLists.txt
+++ b/lld/ELF/CMakeLists.txt
@@ -33,6 +33,7 @@ add_lld_library(lldELF
Arch/PPC64.cpp
Arch/RISCV.cpp
Arch/SPARCV9.cpp
+ Arch/SystemZ.cpp
Arch/X86.cpp
Arch/X86_64.cpp
ARMErrataFix.cpp
diff --git a/lld/ELF/Driver.cpp b/lld/ELF/Driver.cpp
index c19fba6..4bb9b7a 100644
--- a/lld/ELF/Driver.cpp
+++ b/lld/ELF/Driver.cpp
@@ -200,6 +200,7 @@ static std::tuple<ELFKind, uint16_t, uint8_t> parseEmulation(StringRef emul) {
.Case("msp430elf", {ELF32LEKind, EM_MSP430})
.Case("elf64_amdgpu", {ELF64LEKind, EM_AMDGPU})
.Case("elf64loongarch", {ELF64LEKind, EM_LOONGARCH})
+ .Case("elf64_s390", {ELF64BEKind, EM_S390})
.Default({ELFNoneKind, EM_NONE});
if (ret.first == ELFNoneKind)
@@ -1137,7 +1138,7 @@ static SmallVector<StringRef, 0> getSymbolOrderingFile(MemoryBufferRef mb) {
static bool getIsRela(opt::InputArgList &args) {
// The psABI specifies the default relocation entry format.
bool rela = is_contained({EM_AARCH64, EM_AMDGPU, EM_HEXAGON, EM_LOONGARCH,
- EM_PPC, EM_PPC64, EM_RISCV, EM_X86_64},
+ EM_PPC, EM_PPC64, EM_RISCV, EM_S390, EM_X86_64},
config->emachine);
// If -z rel or -z rela is specified, use the last option.
for (auto *arg : args.filtered(OPT_z)) {
diff --git a/lld/ELF/InputFiles.cpp b/lld/ELF/InputFiles.cpp
index a292e87..00aebb4 100644
--- a/lld/ELF/InputFiles.cpp
+++ b/lld/ELF/InputFiles.cpp
@@ -41,8 +41,8 @@ using namespace llvm::support::endian;
using namespace lld;
using namespace lld::elf;
-// This function is explicity instantiated in ARM.cpp, don't do it here to avoid
-// warnings with MSVC.
+// This function is explicitly instantiated in ARM.cpp, don't do it here to
+// avoid warnings with MSVC.
extern template void ObjFile<ELF32LE>::importCmseSymbols();
extern template void ObjFile<ELF32BE>::importCmseSymbols();
extern template void ObjFile<ELF64LE>::importCmseSymbols();
@@ -323,7 +323,7 @@ template <class ELFT> static void doParseFile(InputFile *file) {
// Add symbols in File to the symbol table.
void elf::parseFile(InputFile *file) { invokeELFT(doParseFile, file); }
-// This function is explicity instantiated in ARM.cpp. Mark it extern here,
+// This function is explicitly instantiated in ARM.cpp. Mark it extern here,
// to avoid warnings when building with MSVC.
extern template void ObjFile<ELF32LE>::importCmseSymbols();
extern template void ObjFile<ELF32BE>::importCmseSymbols();
@@ -1614,6 +1614,8 @@ static uint16_t getBitcodeMachineKind(StringRef path, const Triple &t) {
return EM_RISCV;
case Triple::sparcv9:
return EM_SPARCV9;
+ case Triple::systemz:
+ return EM_S390;
case Triple::x86:
return t.isOSIAMCU() ? EM_IAMCU : EM_386;
case Triple::x86_64:
diff --git a/lld/ELF/InputSection.cpp b/lld/ELF/InputSection.cpp
index 3d726b4..e033a71 100644
--- a/lld/ELF/InputSection.cpp
+++ b/lld/ELF/InputSection.cpp
@@ -655,6 +655,7 @@ static int64_t getTlsTpOffset(const Symbol &s) {
// Variant 2.
case EM_HEXAGON:
+ case EM_S390:
case EM_SPARCV9:
case EM_386:
case EM_X86_64:
@@ -717,6 +718,10 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type,
case R_GOT_PC:
case R_RELAX_TLS_GD_TO_IE:
return sym.getGotVA() + a - p;
+ case R_GOTPLT_GOTREL:
+ return sym.getGotPltVA() + a - in.got->getVA();
+ case R_GOTPLT_PC:
+ return sym.getGotPltVA() + a - p;
case R_LOONGARCH_GOT_PAGE_PC:
if (sym.hasFlag(NEEDS_TLSGD))
return getLoongArchPageDelta(in.got->getGlobalDynAddr(sym) + a, p, type);
@@ -808,6 +813,8 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type,
return getLoongArchPageDelta(sym.getPltVA() + a, p, type);
case R_PLT_GOTPLT:
return sym.getPltVA() + a - in.gotPlt->getVA();
+ case R_PLT_GOTREL:
+ return sym.getPltVA() + a - in.got->getVA();
case R_PPC32_PLTREL:
// R_PPC_PLTREL24 uses the addend (usually 0 or 0x8000) to indicate r30
// stores _GLOBAL_OFFSET_TABLE_ or .got2+0x8000. The addend is ignored for
diff --git a/lld/ELF/Relocations.cpp b/lld/ELF/Relocations.cpp
index 79c8230..619fbaf 100644
--- a/lld/ELF/Relocations.cpp
+++ b/lld/ELF/Relocations.cpp
@@ -203,8 +203,9 @@ static bool isAbsoluteValue(const Symbol &sym) {
// Returns true if Expr refers a PLT entry.
static bool needsPlt(RelExpr expr) {
- return oneof<R_PLT, R_PLT_PC, R_PLT_GOTPLT, R_LOONGARCH_PLT_PAGE_PC,
- R_PPC32_PLTREL, R_PPC64_CALL_PLT>(expr);
+ return oneof<R_PLT, R_PLT_PC, R_PLT_GOTREL, R_PLT_GOTPLT, R_GOTPLT_GOTREL,
+ R_GOTPLT_PC, R_LOONGARCH_PLT_PAGE_PC, R_PPC32_PLTREL,
+ R_PPC64_CALL_PLT>(expr);
}
bool lld::elf::needsGot(RelExpr expr) {
@@ -233,6 +234,8 @@ static RelExpr toPlt(RelExpr expr) {
return R_PLT_PC;
case R_ABS:
return R_PLT;
+ case R_GOTREL:
+ return R_PLT_GOTREL;
default:
return expr;
}
@@ -253,6 +256,8 @@ static RelExpr fromPlt(RelExpr expr) {
return R_ABS;
case R_PLT_GOTPLT:
return R_GOTPLTREL;
+ case R_PLT_GOTREL:
+ return R_GOTREL;
default:
return expr;
}
@@ -935,7 +940,7 @@ void elf::addGotEntry(Symbol &sym) {
static void addTpOffsetGotEntry(Symbol &sym) {
in.got->addEntry(sym);
uint64_t off = sym.getGotOffset();
- if (!sym.isPreemptible && !config->isPic) {
+ if (!sym.isPreemptible && !config->shared) {
in.got->addConstant({R_TPREL, target->symbolicRel, off, 0, &sym});
return;
}
@@ -979,10 +984,10 @@ bool RelocationScanner::isStaticLinkTimeConstant(RelExpr e, RelType type,
if (oneof<R_GOTPLT, R_GOT_OFF, R_RELAX_HINT, R_MIPS_GOT_LOCAL_PAGE,
R_MIPS_GOTREL, R_MIPS_GOT_OFF, R_MIPS_GOT_OFF32, R_MIPS_GOT_GP_PC,
R_AARCH64_GOT_PAGE_PC, R_GOT_PC, R_GOTONLY_PC, R_GOTPLTONLY_PC,
- R_PLT_PC, R_PLT_GOTPLT, R_PPC32_PLTREL, R_PPC64_CALL_PLT,
- R_PPC64_RELAX_TOC, R_RISCV_ADD, R_AARCH64_GOT_PAGE,
- R_LOONGARCH_PLT_PAGE_PC, R_LOONGARCH_GOT, R_LOONGARCH_GOT_PAGE_PC>(
- e))
+ R_PLT_PC, R_PLT_GOTREL, R_PLT_GOTPLT, R_GOTPLT_GOTREL, R_GOTPLT_PC,
+ R_PPC32_PLTREL, R_PPC64_CALL_PLT, R_PPC64_RELAX_TOC, R_RISCV_ADD,
+ R_AARCH64_GOT_PAGE, R_LOONGARCH_PLT_PAGE_PC, R_LOONGARCH_GOT,
+ R_LOONGARCH_GOT_PAGE_PC>(e))
return true;
// These never do, except if the entire file is position dependent or if
@@ -1374,8 +1379,8 @@ static unsigned handleTlsRelocation(RelType type, Symbol &sym,
R_LOONGARCH_GOT_PAGE_PC, R_GOT_OFF, R_TLSIE_HINT>(expr)) {
ctx.hasTlsIe.store(true, std::memory_order_relaxed);
// Initial-Exec relocs can be optimized to Local-Exec if the symbol is
- // locally defined.
- if (execOptimize && isLocalInExecutable) {
+ // locally defined. This is not supported on SystemZ.
+ if (execOptimize && isLocalInExecutable && config->emachine != EM_S390) {
c.addReloc({R_RELAX_TLS_IE_TO_LE, type, offset, addend, &sym});
} else if (expr != R_TLSIE_HINT) {
sym.setFlags(NEEDS_TLSIE);
@@ -1534,8 +1539,10 @@ void RelocationScanner::scan(ArrayRef<RelTy> rels) {
// For EhInputSection, OffsetGetter expects the relocations to be sorted by
// r_offset. In rare cases (.eh_frame pieces are reordered by a linker
// script), the relocations may be unordered.
+ // On SystemZ, all sections need to be sorted by r_offset, to allow TLS
+ // relaxation to be handled correctly - see SystemZ::getTlsGdRelaxSkip.
SmallVector<RelTy, 0> storage;
- if (isa<EhInputSection>(sec))
+ if (isa<EhInputSection>(sec) || config->emachine == EM_S390)
rels = sortRels(rels, storage);
end = static_cast<const void *>(rels.end());
diff --git a/lld/ELF/Relocations.h b/lld/ELF/Relocations.h
index cfb9092..7eb8a811 100644
--- a/lld/ELF/Relocations.h
+++ b/lld/ELF/Relocations.h
@@ -40,11 +40,14 @@ enum RelExpr {
R_GOTPLT,
R_GOTPLTREL,
R_GOTREL,
+ R_GOTPLT_GOTREL,
+ R_GOTPLT_PC,
R_NONE,
R_PC,
R_PLT,
R_PLT_PC,
R_PLT_GOTPLT,
+ R_PLT_GOTREL,
R_RELAX_HINT,
R_RELAX_GOT_PC,
R_RELAX_GOT_PC_NOPIC,
diff --git a/lld/ELF/ScriptParser.cpp b/lld/ELF/ScriptParser.cpp
index dd69916..f0ede1f 100644
--- a/lld/ELF/ScriptParser.cpp
+++ b/lld/ELF/ScriptParser.cpp
@@ -445,6 +445,7 @@ static std::pair<ELFKind, uint16_t> parseBfdName(StringRef s) {
.Case("elf32-msp430", {ELF32LEKind, EM_MSP430})
.Case("elf32-loongarch", {ELF32LEKind, EM_LOONGARCH})
.Case("elf64-loongarch", {ELF64LEKind, EM_LOONGARCH})
+ .Case("elf64-s390", {ELF64BEKind, EM_S390})
.Default({ELFNoneKind, EM_NONE});
}
diff --git a/lld/ELF/SyntheticSections.cpp b/lld/ELF/SyntheticSections.cpp
index 4b413163..bada394 100644
--- a/lld/ELF/SyntheticSections.cpp
+++ b/lld/ELF/SyntheticSections.cpp
@@ -1419,6 +1419,9 @@ DynamicSection<ELFT>::computeContents() {
case EM_MIPS:
addInSec(DT_MIPS_PLTGOT, *in.gotPlt);
break;
+ case EM_S390:
+ addInSec(DT_PLTGOT, *in.got);
+ break;
case EM_SPARCV9:
addInSec(DT_PLTGOT, *in.plt);
break;
diff --git a/lld/ELF/Target.cpp b/lld/ELF/Target.cpp
index 286db1e..d879a42 100644
--- a/lld/ELF/Target.cpp
+++ b/lld/ELF/Target.cpp
@@ -87,6 +87,8 @@ TargetInfo *elf::getTarget() {
return getRISCVTargetInfo();
case EM_SPARCV9:
return getSPARCV9TargetInfo();
+ case EM_S390:
+ return getSystemZTargetInfo();
case EM_X86_64:
return getX86_64TargetInfo();
default:
diff --git a/lld/ELF/Target.h b/lld/ELF/Target.h
index ed00e81..0cefa31 100644
--- a/lld/ELF/Target.h
+++ b/lld/ELF/Target.h
@@ -188,6 +188,7 @@ TargetInfo *getPPC64TargetInfo();
TargetInfo *getPPCTargetInfo();
TargetInfo *getRISCVTargetInfo();
TargetInfo *getSPARCV9TargetInfo();
+TargetInfo *getSystemZTargetInfo();
TargetInfo *getX86TargetInfo();
TargetInfo *getX86_64TargetInfo();
template <class ELFT> TargetInfo *getMipsTargetInfo();
diff --git a/lld/ELF/Thunks.cpp b/lld/ELF/Thunks.cpp
index 5f543ff..f912f61 100644
--- a/lld/ELF/Thunks.cpp
+++ b/lld/ELF/Thunks.cpp
@@ -1338,7 +1338,7 @@ static Thunk *addThunkV6M(const InputSection &isec, RelType reloc, Symbol &s,
return make<ThumbV6MPILongThunk>(s, a);
fatal("relocation " + toString(reloc) + " to " + toString(s) +
- " not supported for Armv6-M targets for position independant"
+ " not supported for Armv6-M targets for position independent"
" and execute only code");
}
if (isPureCode)
diff --git a/lld/MinGW/Driver.cpp b/lld/MinGW/Driver.cpp
index 290eeca..efd643f 100644
--- a/lld/MinGW/Driver.cpp
+++ b/lld/MinGW/Driver.cpp
@@ -451,6 +451,10 @@ bool link(ArrayRef<const char *> argsArr, llvm::raw_ostream &stdoutOS,
add("-lto-cs-profile-generate");
if (auto *arg = args.getLastArg(OPT_lto_cs_profile_file))
add("-lto-cs-profile-file:" + StringRef(arg->getValue()));
+ if (args.hasArg(OPT_plugin_opt_emit_llvm))
+ add("-lldemit:llvm");
+ if (args.hasArg(OPT_lto_emit_asm))
+ add("-lldemit:asm");
if (auto *a = args.getLastArg(OPT_thinlto_cache_dir))
add("-lldltocache:" + StringRef(a->getValue()));
diff --git a/lld/MinGW/Options.td b/lld/MinGW/Options.td
index 02f00f2..9a0a96a 100644
--- a/lld/MinGW/Options.td
+++ b/lld/MinGW/Options.td
@@ -158,6 +158,8 @@ def lto_cs_profile_generate: FF<"lto-cs-profile-generate">,
HelpText<"Perform context sensitive PGO instrumentation">;
def lto_cs_profile_file: JJ<"lto-cs-profile-file=">,
HelpText<"Context sensitive profile file path">;
+def lto_emit_asm: FF<"lto-emit-asm">,
+ HelpText<"Emit assembly code">;
def thinlto_cache_dir: JJ<"thinlto-cache-dir=">,
HelpText<"Path to ThinLTO cached object file directory">;
@@ -181,6 +183,9 @@ def: J<"plugin-opt=cs-profile-path=">,
Alias<lto_cs_profile_file>, HelpText<"Alias for --lto-cs-profile-file">;
def plugin_opt_dwo_dir_eq: J<"plugin-opt=dwo_dir=">,
HelpText<"Directory to store .dwo files when LTO and debug fission are used">;
+def plugin_opt_emit_asm: F<"plugin-opt=emit-asm">,
+ Alias<lto_emit_asm>, HelpText<"Alias for --lto-emit-asm">;
+def plugin_opt_emit_llvm: F<"plugin-opt=emit-llvm">;
def: J<"plugin-opt=jobs=">, Alias<thinlto_jobs_eq>, HelpText<"Alias for --thinlto-jobs=">;
def plugin_opt_mcpu_eq: J<"plugin-opt=mcpu=">;
diff --git a/lld/test/ELF/Inputs/systemz-init.s b/lld/test/ELF/Inputs/systemz-init.s
new file mode 100644
index 0000000..1611b69
--- /dev/null
+++ b/lld/test/ELF/Inputs/systemz-init.s
@@ -0,0 +1,5 @@
+// glibc < 2.39 used to align .init and .fini code at a 4-byte boundary.
+// This file aims to recreate that behavior.
+ .section .init,"ax",@progbits
+ .align 4
+ lg %r4, 272(%r15)
diff --git a/lld/test/ELF/arm-thumb-thunk-v6m-xo.s b/lld/test/ELF/arm-thumb-thunk-v6m-xo.s
index 10f1e73..f1b6c0c 100644
--- a/lld/test/ELF/arm-thumb-thunk-v6m-xo.s
+++ b/lld/test/ELF/arm-thumb-thunk-v6m-xo.s
@@ -54,4 +54,4 @@ far:
// CHECK-NEXT: <far>:
// CHECK-NEXT: 12345678: bx lr
-// CHECK-PI: error: relocation R_ARM_THM_CALL to far not supported for Armv6-M targets for position independant and execute only code
+// CHECK-PI: error: relocation R_ARM_THM_CALL to far not supported for Armv6-M targets for position independent and execute only code
diff --git a/lld/test/ELF/basic-systemz.s b/lld/test/ELF/basic-systemz.s
new file mode 100644
index 0000000..f7bb0e8
--- /dev/null
+++ b/lld/test/ELF/basic-systemz.s
@@ -0,0 +1,63 @@
+# REQUIRES: systemz
+# RUN: llvm-mc -filetype=obj -triple=s390x-unknown-linux %s -o %t.o
+# RUN: ld.lld --hash-style=sysv -discard-all -shared %t.o -o %t.so
+# RUN: llvm-readelf --file-header --program-headers --section-headers --dynamic-table %t.so | FileCheck %s
+
+# Exits with return code 55 on linux.
+.text
+ lghi 2,55
+ svc 1
+
+# CHECK: ELF Header:
+# CHECK-NEXT: Magic: 7f 45 4c 46 02 02 01 00 00 00 00 00 00 00 00 00
+# CHECK-NEXT: Class: ELF64
+# CHECK-NEXT: Data: 2's complement, big endian
+# CHECK-NEXT: Version: 1 (current)
+# CHECK-NEXT: OS/ABI: UNIX - System V
+# CHECK-NEXT: ABI Version: 0
+# CHECK-NEXT: Type: DYN (Shared object file)
+# CHECK-NEXT: Machine: IBM S/390
+# CHECK-NEXT: Version: 0x1
+# CHECK-NEXT: Entry point address: 0x0
+# CHECK-NEXT: Start of program headers: 64 (bytes into file)
+# CHECK-NEXT: Start of section headers: 768 (bytes into file)
+# CHECK-NEXT: Flags: 0x0
+# CHECK-NEXT: Size of this header: 64 (bytes)
+# CHECK-NEXT: Size of program headers: 56 (bytes)
+# CHECK-NEXT: Number of program headers: 7
+# CHECK-NEXT: Size of section headers: 64 (bytes)
+# CHECK-NEXT: Number of section headers: 11
+# CHECK-NEXT: Section header string table index: 9
+
+# CHECK: Section Headers:
+# CHECK-NEXT: [Nr] Name Type Address Off Size ES Flg Lk Inf Al
+# CHECK-NEXT: [ 0] NULL 0000000000000000 000000 000000 00 0 0 0
+# CHECK-NEXT: [ 1] .dynsym DYNSYM 00000000000001c8 0001c8 000018 18 A 3 1 8
+# CHECK-NEXT: [ 2] .hash HASH 00000000000001e0 0001e0 000010 04 A 1 0 4
+# CHECK-NEXT: [ 3] .dynstr STRTAB 00000000000001f0 0001f0 000001 00 A 0 0 1
+# CHECK-NEXT: [ 4] .text PROGBITS 00000000000011f4 0001f4 000006 00 AX 0 0 4
+# CHECK-NEXT: [ 5] .dynamic DYNAMIC 0000000000002200 000200 000060 10 WA 3 0 8
+# CHECK-NEXT: [ 6] .relro_padding NOBITS 0000000000002260 000260 000da0 00 WA 0 0 1
+# CHECK-NEXT: [ 7] .comment PROGBITS 0000000000000000 000260 000008 01 MS 0 0 1
+# CHECK-NEXT: [ 8] .symtab SYMTAB 0000000000000000 000268 000030 18 10 2 8
+# CHECK-NEXT: [ 9] .shstrtab STRTAB 0000000000000000 000298 000058 00 0 0 1
+# CHECK-NEXT: [10] .strtab STRTAB 0000000000000000 0002f0 00000a 00 0 0 1
+
+# CHECK: Program Headers:
+# CHECK-NEXT: Type Offset VirtAddr PhysAddr FileSiz MemSiz Flg Align
+# CHECK-NEXT: PHDR 0x000040 0x0000000000000040 0x0000000000000040 0x000188 0x000188 R 0x8
+# CHECK-NEXT: LOAD 0x000000 0x0000000000000000 0x0000000000000000 0x0001f1 0x0001f1 R 0x1000
+# CHECK-NEXT: LOAD 0x0001f4 0x00000000000011f4 0x00000000000011f4 0x000006 0x000006 R E 0x1000
+# CHECK-NEXT: LOAD 0x000200 0x0000000000002200 0x0000000000002200 0x000060 0x000e00 RW 0x1000
+# CHECK-NEXT: DYNAMIC 0x000200 0x0000000000002200 0x0000000000002200 0x000060 0x000060 RW 0x8
+# CHECK-NEXT: GNU_RELRO 0x000200 0x0000000000002200 0x0000000000002200 0x000060 0x000e00 R 0x1
+# CHECK-NEXT: GNU_STACK 0x000000 0x0000000000000000 0x0000000000000000 0x000000 0x000000 RW 0x0
+
+# CHECK: Dynamic section at offset 0x200 contains 6 entries:
+# CHECK-NEXT: Tag Type Name/Value
+# CHECK-NEXT: 0x0000000000000006 (SYMTAB) 0x1c8
+# CHECK-NEXT: 0x000000000000000b (SYMENT) 24 (bytes)
+# CHECK-NEXT: 0x0000000000000005 (STRTAB) 0x1f0
+# CHECK-NEXT: 0x000000000000000a (STRSZ) 1 (bytes)
+# CHECK-NEXT: 0x0000000000000004 (HASH) 0x1e0
+# CHECK-NEXT: 0x0000000000000000 (NULL) 0x0
diff --git a/lld/test/ELF/emulation-systemz.s b/lld/test/ELF/emulation-systemz.s
new file mode 100644
index 0000000..dfdb462
--- /dev/null
+++ b/lld/test/ELF/emulation-systemz.s
@@ -0,0 +1,29 @@
+# REQUIRES: systemz
+# RUN: llvm-mc -filetype=obj -triple=s390x-unknown-linux %s -o %t.o
+# RUN: ld.lld -m elf64_s390 %t.o -o %t1
+# RUN: llvm-readelf --file-header %t1 | FileCheck %s
+# RUN: ld.lld %t.o -o %t2
+# RUN: llvm-readelf --file-header %t2 | FileCheck %s
+# RUN: echo 'OUTPUT_FORMAT(elf64-s390)' > %t.script
+# RUN: ld.lld %t.script %t.o -o %t3
+# RUN: llvm-readelf --file-header %t3 | FileCheck %s
+
+# CHECK: ELF Header:
+# CHECK-NEXT: Magic: 7f 45 4c 46 02 02 01 00 00 00 00 00 00 00 00 00
+# CHECK-NEXT: Class: ELF64
+# CHECK-NEXT: Data: 2's complement, big endian
+# CHECK-NEXT: Version: 1 (current)
+# CHECK-NEXT: OS/ABI: UNIX - System V
+# CHECK-NEXT: ABI Version: 0
+# CHECK-NEXT: Type: EXEC (Executable file)
+# CHECK-NEXT: Machine: IBM S/390
+# CHECK-NEXT: Version: 0x1
+# CHECK-NEXT: Entry point address:
+# CHECK-NEXT: Start of program headers: 64 (bytes into file)
+# CHECK-NEXT: Start of section headers:
+# CHECK-NEXT: Flags: 0x0
+# CHECK-NEXT: Size of this header: 64 (bytes)
+# CHECK-NEXT: Size of program headers: 56 (bytes)
+
+.globl _start
+_start:
diff --git a/lld/test/ELF/lto/systemz.ll b/lld/test/ELF/lto/systemz.ll
new file mode 100644
index 0000000..42bf4e3
--- /dev/null
+++ b/lld/test/ELF/lto/systemz.ll
@@ -0,0 +1,18 @@
+; REQUIRES: systemz
+;; Test we can infer the e_machine value EM_S390 from a bitcode file.
+
+; RUN: llvm-as %s -o %t.o
+; RUN: ld.lld %t.o -o %t
+; RUN: llvm-readobj -h %t | FileCheck %s
+
+; CHECK: Class: 64-bit
+; CHECK: DataEncoding: BigEndian
+; CHECK: Machine: EM_S390
+
+target datalayout = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-v128:64-a:8:16-n32:64"
+target triple = "s390x-unknown-linux-gnu"
+
+define void @_start() {
+entry:
+ ret void
+}
diff --git a/lld/test/ELF/systemz-got.s b/lld/test/ELF/systemz-got.s
new file mode 100644
index 0000000..1d558aa
--- /dev/null
+++ b/lld/test/ELF/systemz-got.s
@@ -0,0 +1,16 @@
+# REQUIRES: systemz
+# RUN: llvm-mc -filetype=obj -triple=s390x-unknown-linux %s -o %t.o
+# RUN: llvm-mc -filetype=obj -triple=s390x-unknown-linux %p/Inputs/shared.s -o %t2.o
+# RUN: ld.lld -shared %t2.o -soname=%t2.so -o %t2.so
+
+# RUN: ld.lld -dynamic-linker /lib/ld64.so.1 %t.o %t2.so -o %t
+# RUN: llvm-readelf -S -r %t | FileCheck %s
+
+# CHECK: .got PROGBITS {{.*}} {{.*}} 000020 00 WA 0 0 8
+
+# CHECK: Relocation section '.rela.dyn' at offset {{.*}} contains 1 entries:
+# CHECK: {{.*}} 000000010000000a R_390_GLOB_DAT 0000000000000000 bar + 0
+
+.global _start
+_start:
+ lgrl %r1,bar@GOT
diff --git a/lld/test/ELF/systemz-gotent-relax-align.s b/lld/test/ELF/systemz-gotent-relax-align.s
new file mode 100644
index 0000000..c632608
--- /dev/null
+++ b/lld/test/ELF/systemz-gotent-relax-align.s
@@ -0,0 +1,48 @@
+# REQUIRES: systemz
+## Verify that R_390_GOTENT optimization is not performed on misaligned symbols.
+
+# RUN: llvm-mc -filetype=obj -relax-relocations -triple=s390x-unknown-linux %s -o %t.o
+# RUN: ld.lld %t.o -o %t1
+# RUN: llvm-readelf -S -r -x .got -x .got.plt %t1 | FileCheck --check-prefixes=CHECK %s
+# RUN: llvm-objdump --no-print-imm-hex -d %t1 | FileCheck --check-prefix=DISASM %s
+
+## We retain one .got entry for the unaligned symbol.
+# CHECK: Name Type Address Off Size ES Flg Lk Inf Al
+# CHECK: .got PROGBITS 00000000010021e0 0001e0 000020 00 WA 0 0 8
+# CHECK-NEXT: .relro_padding NOBITS 0000000001002200 000200 000e00 00 WA 0 0 1
+# CHECK-NEXT: .data PROGBITS 0000000001003200 000200 000006 00 WA 0 0 2
+
+# CHECK-LABEL: Hex dump of section '.got':
+# CHECK-NEXT: 0x010021e0 00000000 00000000 00000000 00000000
+# CHECK-NEXT: 0x010021f0 00000000 00000000 00000000 01003205
+
+# DISASM: Disassembly of section .text:
+# DISASM: <_start>:
+# DISASM-NEXT: larl %r1, 0x1003200
+# DISASM-NEXT: larl %r1, 0x1003200
+# DISASM-NEXT: lgrl %r1, 0x10021f8
+# DISASM-NEXT: lgrl %r1, 0x10021f8
+
+.data
+.globl var_align
+.hidden var_align
+ .align 2
+var_align:
+ .long 0
+
+.data
+.globl var_unalign
+.hidden var_unalign
+ .align 2
+ .byte 0
+var_unalign:
+ .byte 0
+
+.text
+.globl _start
+.type _start, @function
+_start:
+ lgrl %r1, var_align@GOT
+ lgrl %r1, var_align@GOT
+ lgrl %r1, var_unalign@GOT
+ lgrl %r1, var_unalign@GOT
diff --git a/lld/test/ELF/systemz-gotent-relax-und-dso.s b/lld/test/ELF/systemz-gotent-relax-und-dso.s
new file mode 100644
index 0000000..57369a4
--- /dev/null
+++ b/lld/test/ELF/systemz-gotent-relax-und-dso.s
@@ -0,0 +1,68 @@
+# REQUIRES: systemz
+# RUN: llvm-mc -filetype=obj -relax-relocations -triple=s390x-unknown-linux %s -o %t.o
+# RUN: llvm-mc -filetype=obj -relax-relocations -triple=s390x-unknown-linux %S/Inputs/gotpc-relax-und-dso.s -o %tdso.o
+# RUN: ld.lld -shared %tdso.o -soname=t.so -o %t.so
+# RUN: ld.lld --hash-style=sysv -shared %t.o %t.so -o %t
+# RUN: llvm-readelf -r %t | FileCheck --check-prefix=RELOC %s
+# RUN: llvm-objdump --no-print-imm-hex -d %t | FileCheck --check-prefix=DISASM %s
+
+# RELOC-LABEL: Relocation section '.rela.dyn' at offset {{.*}} contains 3 entries:
+# RELOC: 00000000000023f8 000000010000000a R_390_GLOB_DAT 00000000000012d8 foo + 0
+# RELOC: 0000000000002400 000000030000000a R_390_GLOB_DAT 0000000000000000 und + 0
+# RELOC: 0000000000002408 000000040000000a R_390_GLOB_DAT 0000000000000000 dsofoo + 0
+
+# DISASM: Disassembly of section .text:
+# DISASM-EMPTY:
+# DISASM-NEXT: <foo>:
+# DISASM-NEXT: bc 0, 0
+# DISASM: <hid>:
+# DISASM-NEXT: bc 0, 0
+# DISASM: <_start>:
+# DISASM-NEXT: lgrl %r1, 0x2400
+# DISASM-NEXT: lgrl %r1, 0x2400
+# DISASM-NEXT: lgrl %r1, 0x2408
+# DISASM-NEXT: lgrl %r1, 0x2408
+# DISASM-NEXT: larl %r1, 0x12dc
+# DISASM-NEXT: larl %r1, 0x12dc
+# DISASM-NEXT: lgrl %r1, 0x23f8
+# DISASM-NEXT: lgrl %r1, 0x23f8
+# DISASM-NEXT: lgrl %r1, 0x2400
+# DISASM-NEXT: lgrl %r1, 0x2400
+# DISASM-NEXT: lgrl %r1, 0x2408
+# DISASM-NEXT: lgrl %r1, 0x2408
+# DISASM-NEXT: larl %r1, 0x12dc
+# DISASM-NEXT: larl %r1, 0x12dc
+# DISASM-NEXT: lgrl %r1, 0x23f8
+# DISASM-NEXT: lgrl %r1, 0x23f8
+
+.text
+.globl foo
+.type foo, @function
+foo:
+ nop
+
+.globl hid
+.hidden hid
+.type hid, @function
+hid:
+ nop
+
+.globl _start
+.type _start, @function
+_start:
+ lgrl %r1, und@GOT
+ lgrl %r1, und@GOT
+ lgrl %r1, dsofoo@GOT
+ lgrl %r1, dsofoo@GOT
+ lgrl %r1, hid@GOT
+ lgrl %r1, hid@GOT
+ lgrl %r1, foo@GOT
+ lgrl %r1, foo@GOT
+ lgrl %r1, und@GOT
+ lgrl %r1, und@GOT
+ lgrl %r1, dsofoo@GOT
+ lgrl %r1, dsofoo@GOT
+ lgrl %r1, hid@GOT
+ lgrl %r1, hid@GOT
+ lgrl %r1, foo@GOT
+ lgrl %r1, foo@GOT
diff --git a/lld/test/ELF/systemz-gotent-relax.s b/lld/test/ELF/systemz-gotent-relax.s
new file mode 100644
index 0000000..f665e1a
--- /dev/null
+++ b/lld/test/ELF/systemz-gotent-relax.s
@@ -0,0 +1,91 @@
+# REQUIRES: systemz
+## Test R_390_GOTENT optimization.
+
+# RUN: llvm-mc -filetype=obj -relax-relocations -triple=s390x-unknown-linux %s -o %t.o
+# RUN: ld.lld %t.o -o %t1 --no-apply-dynamic-relocs
+# RUN: llvm-readelf -S -r -x .got.plt %t1 | FileCheck --check-prefixes=CHECK,NOAPPLY %s
+# RUN: ld.lld %t.o -o %t1 --apply-dynamic-relocs
+# RUN: llvm-readelf -S -r -x .got.plt %t1 | FileCheck --check-prefixes=CHECK,APPLY %s
+# RUN: ld.lld %t.o -o %t1
+# RUN: llvm-objdump --no-print-imm-hex -d %t1 | FileCheck --check-prefix=DISASM %s
+
+## --no-relax disables GOT optimization.
+# RUN: ld.lld --no-relax %t.o -o %t2
+# RUN: llvm-objdump --no-print-imm-hex -d %t2 | FileCheck --check-prefix=NORELAX %s
+
+## In our implementation, .got is retained even if all GOT-generating relocations are optimized.
+# CHECK: Name Type Address Off Size ES Flg Lk Inf Al
+# CHECK: .iplt PROGBITS 0000000001001240 000240 000020 00 AX 0 0 16
+# CHECK-NEXT: .got PROGBITS 0000000001002260 000260 000018 00 WA 0 0 8
+# CHECK-NEXT: .relro_padding NOBITS 0000000001002278 000278 000d88 00 WA 0 0 1
+# CHECK-NEXT: .got.plt PROGBITS 0000000001003278 000278 000008 00 WA 0 0 8
+
+## There is one R_S390_IRELATIVE relocation.
+# CHECK-LABEL: Relocation section '.rela.dyn' at offset {{.*}} contains 1 entries:
+# CHECK: 0000000001003278 000000000000003d R_390_IRELATIVE 10011e8
+
+# CHECK-LABEL: Hex dump of section '.got.plt':
+# NOAPPLY-NEXT: 0x01003278 00000000 00000000
+# APPLY-NEXT: 0x01003278 00000000 010011e8
+
+# DISASM: Disassembly of section .text:
+# DISASM: 00000000010011e0 <foo>:
+# DISASM-NEXT: bc 0, 0
+# DISASM: 00000000010011e4 <hid>:
+# DISASM-NEXT: bc 0, 0
+# DISASM: 00000000010011e8 <ifunc>:
+# DISASM-NEXT: br %r14
+# DISASM: 00000000010011ea <_start>:
+# DISASM-NEXT: larl %r1, 0x10011e0
+# DISASM-NEXT: larl %r1, 0x10011e0
+# DISASM-NEXT: larl %r1, 0x10011e4
+# DISASM-NEXT: larl %r1, 0x10011e4
+# DISASM-NEXT: lgrl %r1, 0x1003278
+# DISASM-NEXT: lgrl %r1, 0x1003278
+# DISASM-NEXT: larl %r1, 0x10011e0
+# DISASM-NEXT: larl %r1, 0x10011e0
+# DISASM-NEXT: larl %r1, 0x10011e4
+# DISASM-NEXT: larl %r1, 0x10011e4
+# DISASM-NEXT: lgrl %r1, 0x1003278
+# DISASM-NEXT: lgrl %r1, 0x1003278
+
+# NORELAX-LABEL: <_start>:
+# NORELAX-COUNT-12: lgrl
+
+.text
+.globl foo
+
+.text
+.globl foo
+.type foo, @function
+foo:
+ nop
+
+.globl hid
+.hidden hid
+.type hid, @function
+hid:
+ nop
+
+.text
+.type ifunc STT_GNU_IFUNC
+.globl ifunc
+.type ifunc, @function
+ifunc:
+ br %r14
+
+.globl _start
+.type _start, @function
+_start:
+ lgrl %r1, foo@GOT
+ lgrl %r1, foo@GOT
+ lgrl %r1, hid@GOT
+ lgrl %r1, hid@GOT
+ lgrl %r1, ifunc@GOT
+ lgrl %r1, ifunc@GOT
+ lgrl %r1, foo@GOT
+ lgrl %r1, foo@GOT
+ lgrl %r1, hid@GOT
+ lgrl %r1, hid@GOT
+ lgrl %r1, ifunc@GOT
+ lgrl %r1, ifunc@GOT
diff --git a/lld/test/ELF/systemz-ifunc-nonpreemptible.s b/lld/test/ELF/systemz-ifunc-nonpreemptible.s
new file mode 100644
index 0000000..5056db3
--- /dev/null
+++ b/lld/test/ELF/systemz-ifunc-nonpreemptible.s
@@ -0,0 +1,75 @@
+# REQUIRES: systemz
+# RUN: llvm-mc -filetype=obj -triple=s390x-none-linux-gnu %s -o %t.o
+# RUN: ld.lld -static %t.o -o %t
+# RUN: ld.lld -static %t.o -o %t.apply --apply-dynamic-relocs
+# RUN: llvm-readelf --section-headers --relocations --symbols %t | FileCheck %s
+# RUN: llvm-readelf -x .got.plt %t | FileCheck %s --check-prefix=NO-APPLY-RELOC
+# RUN: llvm-readelf -x .got.plt %t.apply | FileCheck %s --check-prefix=APPLY-RELOC
+# RUN: llvm-objdump --no-print-imm-hex -d --no-show-raw-insn %t | FileCheck %s --check-prefix=DISASM
+
+# CHECK: Section Headers:
+# CHECK-NEXT: [Nr] Name Type Address Off Size ES Flg Lk Inf Al
+# CHECK-NEXT: [ 0] NULL 0000000000000000 000000 000000 00 0 0 0
+# CHECK-NEXT: [ 1] .rela.dyn RELA 0000000001000158 000158 000030 18 AI 0 4 8
+# CHECK-NEXT: [ 2] .text PROGBITS 0000000001001188 000188 00001c 00 AX 0 0 4
+# CHECK-NEXT: [ 3] .iplt PROGBITS 00000000010011b0 0001b0 000040 00 AX 0 0 16
+# CHECK-NEXT: [ 4] .got.plt PROGBITS 00000000010021f0 0001f0 000010 00 WA 0 0 8
+
+# CHECK: Relocation section '.rela.dyn' at offset 0x158 contains 2 entries:
+# CHECK-NEXT: Offset Info Type Symbol's Value Symbol's Name + Addend
+# CHECK-NEXT: 00000000010021f0 000000000000003d R_390_IRELATIVE 1001188
+# CHECK-NEXT: 00000000010021f8 000000000000003d R_390_IRELATIVE 100118a
+
+# CHECK: Symbol table '.symtab' contains 6 entries:
+# CHECK-NEXT: Num: Value Size Type Bind Vis Ndx Name
+# CHECK-NEXT: 0: 0000000000000000 0 NOTYPE LOCAL DEFAULT UND
+# CHECK-NEXT: 1: 0000000001000158 0 NOTYPE LOCAL HIDDEN 1 __rela_iplt_start
+# CHECK-NEXT: 2: 0000000001000188 0 NOTYPE LOCAL HIDDEN 1 __rela_iplt_end
+# CHECK-NEXT: 3: 0000000001001188 0 IFUNC GLOBAL DEFAULT 2 foo
+# CHECK-NEXT: 4: 000000000100118a 0 IFUNC GLOBAL DEFAULT 2 bar
+# CHECK-NEXT: 5: 000000000100118c 0 NOTYPE GLOBAL DEFAULT 2 _start
+
+# NO-APPLY-RELOC-LABEL: Hex dump of section '.got.plt':
+# NO-APPLY-RELOC-NEXT: 0x010021f0 00000000 00000000 00000000 00000000
+# NO-APPLY-RELOC-EMPTY:
+
+# APPLY-RELOC-LABEL: Hex dump of section '.got.plt':
+# APPLY-RELOC-NEXT: 0x010021f0 00000000 01001188 00000000 0100118a
+# APPLY-RELOC-EMPTY:
+
+# DISASM: Disassembly of section .text:
+# DISASM: 0000000001001188 <foo>:
+# DISASM-NEXT: br %r14
+# DISASM: 000000000100118a <bar>:
+# DISASM-NEXT: br %r14
+# DISASM: 000000000100118c <_start>:
+# DISASM-NEXT: brasl %r14, 0x10011b0
+# DISASM-NEXT: brasl %r14, 0x10011d0
+# DISASM-NEXT: larl %r2, 0x1000158
+# DISASM-NEXT: larl %r2, 0x1000188
+# DISASM: Disassembly of section .iplt:
+# DISASM: <.iplt>:
+# DISASM: 10011b0: larl %r1, 0x10021f0
+# DISASM-NEXT: 10011b6: lg %r1, 0(%r1)
+# DISASM-NEXT: 10011bc: br %r1
+# DISASM: 10011d0: larl %r1, 0x10021f8
+# DISASM-NEXT: 10011d6: lg %r1, 0(%r1)
+# DISASM-NEXT: 10011dc: br %r1
+
+.text
+.type foo STT_GNU_IFUNC
+.globl foo
+foo:
+ br %r14
+
+.type bar STT_GNU_IFUNC
+.globl bar
+bar:
+ br %r14
+
+.globl _start
+_start:
+ brasl %r14, foo@plt
+ brasl %r14, bar@plt
+ larl %r2, __rela_iplt_start
+ larl %r2, __rela_iplt_end
diff --git a/lld/test/ELF/systemz-init-padding.s b/lld/test/ELF/systemz-init-padding.s
new file mode 100644
index 0000000..c56b98d
--- /dev/null
+++ b/lld/test/ELF/systemz-init-padding.s
@@ -0,0 +1,27 @@
+# REQUIRES: systemz
+# RUN: llvm-mc -filetype=obj -triple=s390x-unknown-linux %p/Inputs/systemz-init.s -o systemz-init.o
+# RUN: llvm-mc -filetype=obj -triple=s390x-unknown-linux %s -o %t.o
+# RUN: ld.lld -dynamic-linker /lib/ld64.so.1 %t.o systemz-init.o -o %t
+# RUN: llvm-objdump -d --no-show-raw-insn -j .init %t | FileCheck %s
+
+# glibc < 2.39 used to align .init and .fini code at a 4-byte boundary.
+# When that happens, the linker must not pad the code with invalid
+# instructions, e.g. null bytes.
+ .section .init,"ax",@progbits
+ brasl %r14, startup
+
+# CHECK: <.init>:
+# CHECK-NEXT: brasl %r14,
+# CHECK-NEXT: bcr 0, %r7
+# CHECK-NEXT: lg %r4, 272(%r15)
+
+ .text
+ .globl startup
+ .p2align 4
+startup:
+ br %r14
+
+ .globl main
+ .p2align 4
+main:
+ br %r14
diff --git a/lld/test/ELF/systemz-pie.s b/lld/test/ELF/systemz-pie.s
new file mode 100644
index 0000000..bb971a8
--- /dev/null
+++ b/lld/test/ELF/systemz-pie.s
@@ -0,0 +1,38 @@
+# REQUIRES: systemz
+# RUN: llvm-mc -filetype=obj -triple=s390x-unknown-linux %s -o %t1.o
+
+## Check -pie.
+# RUN: ld.lld -pie %t1.o -o %t
+# RUN: llvm-readelf --file-headers --program-headers --dynamic %t | FileCheck %s
+
+# CHECK: ELF Header:
+# CHECK-NEXT: Magic: 7f 45 4c 46 02 02 01 00 00 00 00 00 00 00 00 00
+# CHECK-NEXT: Class: ELF64
+# CHECK-NEXT: Data: 2's complement, big endian
+# CHECK-NEXT: Version: 1 (current)
+# CHECK-NEXT: OS/ABI: UNIX - System V
+# CHECK-NEXT: ABI Version: 0
+# CHECK-NEXT: Type: DYN (Shared object file)
+# CHECK-NEXT: Machine: IBM S/390
+# CHECK-NEXT: Version: 0x1
+
+# CHECK: Program Headers:
+# CHECK-NEXT: Type Offset VirtAddr PhysAddr FileSiz MemSiz Flg Align
+# CHECK-NEXT: PHDR 0x000040 0x0000000000000040 0x0000000000000040 0x000188 0x000188 R 0x8
+# CHECK-NEXT: LOAD 0x000000 0x0000000000000000 0x0000000000000000 0x00020d 0x00020d R 0x1000
+# CHECK-NEXT: LOAD 0x000210 0x0000000000002210 0x0000000000002210 0x000090 0x000df0 RW 0x1000
+# CHECK-NEXT: DYNAMIC 0x000210 0x0000000000002210 0x0000000000002210 0x000090 0x000090 RW 0x8
+# CHECK-NEXT: GNU_RELRO 0x000210 0x0000000000002210 0x0000000000002210 0x000090 0x000df0 R 0x1
+# CHECK-NEXT: GNU_STACK 0x000000 0x0000000000000000 0x0000000000000000 0x000000 0x000000 RW 0x0
+
+# CHECK: Dynamic section at offset 0x210 contains 9 entries:
+# CHECK-NEXT: Tag Type Name/Value
+# CHECK-NEXT: 0x000000006ffffffb (FLAGS_1) PIE
+
+## Check -nopie
+# RUN: ld.lld -no-pie %t1.o -o %t2
+# RUN: llvm-readelf --file-headers %t2 | FileCheck %s --check-prefix=NOPIE
+# NOPIE-NOT: Type: DYN
+
+.globl _start
+_start:
diff --git a/lld/test/ELF/systemz-plt.s b/lld/test/ELF/systemz-plt.s
new file mode 100644
index 0000000..4669f01
--- /dev/null
+++ b/lld/test/ELF/systemz-plt.s
@@ -0,0 +1,83 @@
+# REQUIRES: systemz
+# RUN: echo '.globl bar, weak; .type bar,@function; .type weak,@function; bar: weak:' > %t1.s
+
+# RUN: llvm-mc -filetype=obj -triple=s390x-unknown-linux %t1.s -o %t1.o
+# RUN: ld.lld -shared %t1.o -soname=t1.so -o %t1.so
+# RUN: llvm-mc -filetype=obj -triple=s390x-unknown-linux %s -o %t.o
+# RUN: ld.lld %t.o %t1.so -z separate-code -o %t
+# RUN: llvm-readelf -S -s -r -x .got.plt %t | FileCheck %s
+# RUN: llvm-objdump -d %t | FileCheck --check-prefixes=DIS %s
+
+# CHECK: Section Headers:
+# CHECK: .plt PROGBITS 0000000001001020 001020 000060 00 AX 0 0 16
+# CHECK: .got PROGBITS 00000000010020d0 0020d0 000018 00 WA 0 0 8
+# CHECK: .got.plt PROGBITS 00000000010030e8 0020e8 000010 00 WA 0 0 8
+
+# CHECK: Relocation section '.rela.plt' at offset {{.*}} contains 2 entries:
+# CHECK: 00000000010030e8 000000010000000b R_390_JMP_SLOT 0000000000000000 bar + 0
+# CHECK: 00000000010030f0 000000020000000b R_390_JMP_SLOT 0000000000000000 weak + 0
+
+## A canonical PLT has a non-zero st_value. bar and weak are called but their
+## addresses are not taken, so a canonical PLT is not necessary.
+# CHECK: Symbol table '.dynsym' contains 3 entries:
+# CHECK-NEXT: Num: Value Size Type Bind Vis Ndx Name
+# CHECK-NEXT: 0: 0000000000000000 0 NOTYPE LOCAL DEFAULT UND
+# CHECK-NEXT: 1: 0000000000000000 0 FUNC GLOBAL DEFAULT UND bar
+# CHECK-NEXT: 2: 0000000000000000 0 FUNC WEAK DEFAULT UND weak
+
+## The .got.plt slots relocated by .rela.plt point to .plt
+## This is required by glibc.
+# CHECK: Hex dump of section '.got.plt':
+# CHECK-NEXT: 0x010030e8 00000000 0100104e 00000000 0100106e
+
+# DIS: Disassembly of section .text:
+
+# DIS: 0000000001001000 <_start>:
+# DIS-NEXT: brasl %r14, 0x1001012
+# DIS-NEXT: brasl %r14, 0x1001040
+# DIS-NEXT: brasl %r14, 0x1001060
+
+# DIS: 0000000001001012 <foo>:
+# DIS-NEXT: br %r14
+
+# DIS: Disassembly of section .plt:
+
+# DIS: 0000000001001020 <.plt>:
+# DIS-NEXT: 1001020: e3 10 f0 38 00 24 stg %r1, 56(%r15)
+# DIS-NEXT: 1001026: c0 10 00 00 08 55 larl %r1, 0x10020d0
+# DIS-NEXT: 100102c: d2 07 f0 30 10 08 mvc 48(8,%r15), 8(%r1)
+# DIS-NEXT: 1001032: e3 10 10 10 00 04 lg %r1, 16(%r1)
+# DIS-NEXT: 1001038: 07 f1 br %r1
+# DIS-NEXT: 100103a: 07 00 bcr 0, %r0
+# DIS-NEXT: 100103c: 07 00 bcr 0, %r0
+# DIS-NEXT: 100103e: 07 00 bcr 0, %r0
+# DIS-NEXT: 1001040: c0 10 00 00 10 54 larl %r1, 0x10030e8
+# DIS-NEXT: 1001046: e3 10 10 00 00 04 lg %r1, 0(%r1)
+# DIS-NEXT: 100104c: 07 f1 br %r1
+# DIS-NEXT: 100104e: 0d 10 basr %r1, 0
+# DIS-NEXT: 1001050: e3 10 10 0c 00 14 lgf %r1, 12(%r1)
+# DIS-NEXT: 1001056: c0 f4 ff ff ff e5 jg 0x1001020
+# DIS-NEXT: 100105c: 00 00 <unknown>
+# DIS-NEXT: 100105e: 00 00 <unknown>
+# DIS-NEXT: 1001060: c0 10 00 00 10 48 larl %r1, 0x10030f0
+# DIS-NEXT: 1001066: e3 10 10 00 00 04 lg %r1, 0(%r1)
+# DIS-NEXT: 100106c: 07 f1 br %r1
+# DIS-NEXT: 100106e: 0d 10 basr %r1, 0
+# DIS-NEXT: 1001070: e3 10 10 0c 00 14 lgf %r1, 12(%r1)
+# DIS-NEXT: 1001076: c0 f4 ff ff ff d5 jg 0x1001020
+# DIS-NEXT: 100107c: 00 00 <unknown>
+# DIS-NEXT: 100107e: 00 18 <unknown>
+
+.global _start, foo, bar
+.weak weak
+
+_start:
+ ## Use @plt to avoid generating direct references that would force
+ ## allocation of a canonical PLT entry.
+ brasl %r14, foo@plt
+ brasl %r14, bar@plt
+ brasl %r14, weak@plt
+
+## foo is local and non-preemptable, no PLT is generated.
+foo:
+ br %r14
diff --git a/lld/test/ELF/systemz-reloc-abs.s b/lld/test/ELF/systemz-reloc-abs.s
new file mode 100644
index 0000000..b5ad94d
--- /dev/null
+++ b/lld/test/ELF/systemz-reloc-abs.s
@@ -0,0 +1,32 @@
+# REQUIRES: systemz
+# RUN: llvm-mc -filetype=obj -triple=s390x %s -o %t.o
+# RUN: llvm-mc -filetype=obj -triple=s390x %S/Inputs/abs255.s -o %t255.o
+# RUN: llvm-mc -filetype=obj -triple=s390x %S/Inputs/abs256.s -o %t256.o
+# RUN: llvm-mc -filetype=obj -triple=s390x %S/Inputs/abs257.s -o %t257.o
+
+# RUN: ld.lld %t.o %t256.o -o %t
+# RUN: llvm-readelf -x .data %t | FileCheck %s
+# CHECK: 0x{{[0-9a-f]+}} ff80ffff 8000ffff ffff8000 0000ffff
+# CHECK-NEXT: ffffffff ffff8000 00000000 0000
+
+# RUN: not ld.lld %t.o %t255.o -o /dev/null 2>&1 | FileCheck --check-prefix=OVERFLOW1 %s
+# OVERFLOW1: relocation R_390_8 out of range: -129 is not in [-128, 255]
+# OVERFLOW1: relocation R_390_16 out of range: -32769 is not in [-32768, 65535]
+# OVERFLOW1: relocation R_390_32 out of range: -2147483649 is not in [-2147483648, 4294967295]
+
+# RUN: not ld.lld %t.o %t257.o -o /dev/null 2>&1 | FileCheck --check-prefix=OVERFLOW2 %s
+# OVERFLOW2: relocation R_390_8 out of range: 256 is not in [-128, 255]
+# OVERFLOW2: relocation R_390_16 out of range: 65536 is not in [-32768, 65535]
+# OVERFLOW2: relocation R_390_32 out of range: 4294967296 is not in [-2147483648, 4294967295]
+
+.globl _start
+_start:
+.data
+.byte foo - 1
+.byte foo - 384
+.word foo + 0xfeff
+.word foo - 0x8100
+.long foo + 0xfffffeff
+.long foo - 0x80000100
+.quad foo + 0xfffffffffffffeff
+.quad foo - 0x8000000000000100
diff --git a/lld/test/ELF/systemz-reloc-disp12.s b/lld/test/ELF/systemz-reloc-disp12.s
new file mode 100644
index 0000000..3d32707
--- /dev/null
+++ b/lld/test/ELF/systemz-reloc-disp12.s
@@ -0,0 +1,21 @@
+# REQUIRES: systemz
+# RUN: llvm-mc -filetype=obj -triple=s390x -defsym DISP=291 %s -o %t1.o
+# RUN: llvm-mc -filetype=obj -triple=s390x -defsym DISP=4095 %s -o %t2.o
+# RUN: llvm-mc -filetype=obj -triple=s390x -defsym DISP=4096 %s -o %t3.o
+
+# RUN: ld.lld --section-start=.text=0x0 %t1.o -o %t1out
+# RUN: ld.lld --section-start=.text=0x0 %t2.o -o %t2out
+# RUN: not ld.lld --section-start=.text=0x0 %t3.o -o /dev/null 2>&1 | FileCheck %s --check-prefix RANGE
+
+# RANGE: relocation R_390_12 out of range: 4096 is not in [0, 4095]
+
+# RUN: llvm-readelf --hex-dump=.text %t1out | FileCheck %s -DINSN=58678123 --check-prefix DUMP
+# RUN: llvm-readelf --hex-dump=.text %t2out | FileCheck %s -DINSN=58678fff --check-prefix DUMP
+
+# DUMP: 0x00000000 [[INSN]]
+
+.text
+.globl _start
+_start:
+ .reloc .+2, R_390_12, DISP
+ l %r6, 0(%r7,%r8)
diff --git a/lld/test/ELF/systemz-reloc-disp20.s b/lld/test/ELF/systemz-reloc-disp20.s
new file mode 100644
index 0000000..88cd657
--- /dev/null
+++ b/lld/test/ELF/systemz-reloc-disp20.s
@@ -0,0 +1,21 @@
+# REQUIRES: systemz
+# RUN: llvm-mc -filetype=obj -triple=s390x -defsym DISP=74565 %s -o %t1.o
+# RUN: llvm-mc -filetype=obj -triple=s390x -defsym DISP=524287 %s -o %t2.o
+# RUN: llvm-mc -filetype=obj -triple=s390x -defsym DISP=524288 %s -o %t3.o
+
+# RUN: ld.lld --section-start=.text=0x0 %t1.o -o %t1out
+# RUN: ld.lld --section-start=.text=0x0 %t2.o -o %t2out
+# RUN: not ld.lld --section-start=.text=0x0 %t3.o -o /dev/null 2>&1 | FileCheck %s --check-prefix RANGE
+
+# RANGE: relocation R_390_20 out of range: 524288 is not in [-524288, 524287]
+
+# RUN: llvm-readelf --hex-dump=.text %t1out | FileCheck %s -DINSN="e3678345 1204" --check-prefix DUMP
+# RUN: llvm-readelf --hex-dump=.text %t2out | FileCheck %s -DINSN="e3678fff 7f04" --check-prefix DUMP
+
+# DUMP: 0x00000000 [[INSN]]
+
+.text
+.globl _start
+_start:
+ .reloc .+2, R_390_20, DISP
+ lg %r6, 0(%r7,%r8)
diff --git a/lld/test/ELF/systemz-reloc-got.s b/lld/test/ELF/systemz-reloc-got.s
new file mode 100644
index 0000000..4b9ac16
--- /dev/null
+++ b/lld/test/ELF/systemz-reloc-got.s
@@ -0,0 +1,92 @@
+# REQUIRES: systemz
+# RUN: llvm-mc -filetype=obj -triple=s390x-unknown-linux %s -o %t.o
+# RUN: ld.lld -z norelro -shared %t.o -soname=t.so -o %t.so
+## Note: Without norelro the distance between .got and .got.plt causes
+## R_390_GOTPLT12 relocations to always overflow.
+
+# RUN: llvm-readelf -S -x .data %t.so | FileCheck %s
+# RUN: llvm-objdump -d --no-show-raw-insn %t.so | FileCheck %s --check-prefix=DISASM
+
+# CHECK: Section Headers:
+# CHECK: .got PROGBITS 0000000000002458
+# CHECK: .got.plt PROGBITS 0000000000002480
+
+## Note: _GLOBAL_OFFSET_TABLE is at .got
+## GOT (foo) is at .got + 24 == 0x2470
+## GOT (bar) is at .got + 32 == 0x2478
+## GOTPLT (foo) is at .got.plt + 0 == .got + 40 == 0x2480
+## GOTPLT (bar) is at .got.plt + 8 == .got + 48 == 0x2488
+
+# DISASM: larl %r12, 0x2458
+# DISASM-NEXT: larl %r1, 0x2470
+# DISASM-NEXT: larl %r1, 0x2478
+# DISASM-NEXT: larl %r1, 0x2480
+# DISASM-NEXT: larl %r1, 0x2488
+
+# DISASM-NEXT: l %r1, 24(%r12)
+# DISASM-NEXT: l %r1, 32(%r12)
+# DISASM-NEXT: l %r1, 40(%r12)
+# DISASM-NEXT: l %r1, 48(%r12)
+# DISASM-NEXT: lg %r1, 24(%r12)
+# DISASM-NEXT: lg %r1, 32(%r12)
+# DISASM-NEXT: lg %r1, 40(%r12)
+# DISASM-NEXT: lg %r1, 48(%r12)
+
+# CHECK: Hex dump of section '.data':
+# CHECK-NEXT: 00180020 00280030 00000018 00000020
+# CHECK-NEXT: 00000028 00000030 00000000 00000018
+# CHECK-NEXT: 00000000 00000020 00000000 00000028
+# CHECK-NEXT: 00000000 00000030
+
+.text
+larl %r12, _GLOBAL_OFFSET_TABLE_
+.reloc .+2, R_390_GOTENT, foo+2
+larl %r1, 0
+.reloc .+2, R_390_GOTENT, bar+2
+larl %r1, 0
+.reloc .+2, R_390_GOTPLTENT, foo+2
+larl %r1, 0
+.reloc .+2, R_390_GOTPLTENT, bar+2
+larl %r1, 0
+.reloc .+2, R_390_GOT12, foo
+l %r1, 0(%r12)
+.reloc .+2, R_390_GOT12, bar
+l %r1, 0(%r12)
+.reloc .+2, R_390_GOTPLT12, foo
+l %r1, 0(%r12)
+.reloc .+2, R_390_GOTPLT12, bar
+l %r1, 0(%r12)
+.reloc .+2, R_390_GOT20, foo
+lg %r1, 0(%r12)
+.reloc .+2, R_390_GOT20, bar
+lg %r1, 0(%r12)
+.reloc .+2, R_390_GOTPLT20, foo
+lg %r1, 0(%r12)
+.reloc .+2, R_390_GOTPLT20, bar
+lg %r1, 0(%r12)
+
+.data
+.reloc ., R_390_GOT16, foo
+.space 2
+.reloc ., R_390_GOT16, bar
+.space 2
+.reloc ., R_390_GOTPLT16, foo
+.space 2
+.reloc ., R_390_GOTPLT16, bar
+.space 2
+.reloc ., R_390_GOT32, foo
+.space 4
+.reloc ., R_390_GOT32, bar
+.space 4
+.reloc ., R_390_GOTPLT32, foo
+.space 4
+.reloc ., R_390_GOTPLT32, bar
+.space 4
+.reloc ., R_390_GOT64, foo
+.space 8
+.reloc ., R_390_GOT64, bar
+.space 8
+.reloc ., R_390_GOTPLT64, foo
+.space 8
+.reloc ., R_390_GOTPLT64, bar
+.space 8
diff --git a/lld/test/ELF/systemz-reloc-gotrel.s b/lld/test/ELF/systemz-reloc-gotrel.s
new file mode 100644
index 0000000..46669ec
--- /dev/null
+++ b/lld/test/ELF/systemz-reloc-gotrel.s
@@ -0,0 +1,36 @@
+# REQUIRES: systemz
+# RUN: llvm-mc -filetype=obj -triple=s390x-unknown-linux %s -o %t.o
+# RUN: ld.lld -shared %t.o -soname=t.so -o %t.so
+
+# RUN: llvm-readelf -S -s -x .data %t.so | FileCheck %s
+
+# CHECK: Section Headers:
+# CHECK: .plt PROGBITS 0000000000001290
+# CHECK: .got PROGBITS 0000000000002390
+
+# CHECK: Symbol table '.symtab'
+# CHECK: 0000000000001288 {{.*}} bar
+
+## Note: foo is the first (and only) PLT entry, which resides at .plt + 32
+## PLTOFF (foo) is (.plt + 32) - .got == 0x12b0 - 0x2390 == 0xffffef20
+## GOTOFF (bar) is bar - .got == 0x1288 - 0x2390 == 0xffffeef8
+# CHECK: Hex dump of section '.data':
+# CHECK-NEXT: eef8ef20 ffffeef8 ffffef20 ffffffff
+# CHECK-NEXT: ffffeef8 ffffffff ffffef20
+
+bar:
+ br %r14
+
+.data
+.reloc ., R_390_GOTOFF16, bar
+.space 2
+.reloc ., R_390_PLTOFF16, foo
+.space 2
+.reloc ., R_390_GOTOFF, bar
+.space 4
+.reloc ., R_390_PLTOFF32, foo
+.space 4
+.reloc ., R_390_GOTOFF64, bar
+.space 8
+.reloc ., R_390_PLTOFF64, foo
+.space 8
diff --git a/lld/test/ELF/systemz-reloc-pc16.s b/lld/test/ELF/systemz-reloc-pc16.s
new file mode 100644
index 0000000..e1dad5a
--- /dev/null
+++ b/lld/test/ELF/systemz-reloc-pc16.s
@@ -0,0 +1,39 @@
+# REQUIRES: systemz
+# RUN: rm -rf %t && split-file %s %t
+
+## Check recompile with -fPIC error message
+# RUN: llvm-mc -filetype=obj -triple=s390x-unknown-linux %t/shared.s -o %t/shared.o
+# RUN: not ld.lld -shared %t/shared.o -o /dev/null 2>&1 | FileCheck %s
+
+# CHECK: error: relocation R_390_PC16 cannot be used against symbol '_shared'; recompile with -fPIC
+# CHECK: >>> defined in {{.*}}
+# CHECK: >>> referenced by {{.*}}:(.data+0x1)
+
+## Check patching of negative addends
+
+# RUN: llvm-mc -filetype=obj -triple=s390x -defsym ADDEND=1 %t/addend.s -o %t/1.o
+# RUN: llvm-mc -filetype=obj -triple=s390x -defsym ADDEND=32768 %t/addend.s -o %t/2.o
+# RUN: llvm-mc -filetype=obj -triple=s390x -defsym ADDEND=32769 %t/addend.s -o %t/3.o
+
+# RUN: ld.lld --section-start=.text=0x0 %t/1.o -o %t/1out
+# RUN: ld.lld --section-start=.text=0x0 %t/2.o -o %t/2out
+# RUN: not ld.lld --section-start=.text=0x0 %t/3.o -o /dev/null 2>&1 | FileCheck %s -DFILE=%t/3.o --check-prefix RANGE
+
+# RANGE: error: [[FILE]]:(.text+0x0): relocation R_390_PC16 out of range
+
+# RUN: llvm-readelf --hex-dump=.text %t/1out | FileCheck %s -DADDEND=ffff --check-prefix DUMP
+# RUN: llvm-readelf --hex-dump=.text %t/2out | FileCheck %s -DADDEND=8000 --check-prefix DUMP
+
+# DUMP: 0x00000000 [[ADDEND]]
+
+#--- shared.s
+.data
+ .byte 0xe8
+ .word _shared - .
+
+#--- addend.s
+.text
+.globl _start
+_start:
+ .reloc ., R_390_PC16, .text-ADDEND
+ .space 2
diff --git a/lld/test/ELF/systemz-reloc-pc32.s b/lld/test/ELF/systemz-reloc-pc32.s
new file mode 100644
index 0000000..0cb9322
--- /dev/null
+++ b/lld/test/ELF/systemz-reloc-pc32.s
@@ -0,0 +1,39 @@
+# REQUIRES: systemz
+# RUN: rm -rf %t && split-file %s %t
+
+## Check recompile with -fPIC error message
+# RUN: llvm-mc -filetype=obj -triple=s390x-unknown-linux %t/shared.s -o %t/shared.o
+# RUN: not ld.lld -shared %t/shared.o -o /dev/null 2>&1 | FileCheck %s
+
+# CHECK: error: relocation R_390_PC32 cannot be used against symbol '_shared'; recompile with -fPIC
+# CHECK: >>> defined in {{.*}}
+# CHECK: >>> referenced by {{.*}}:(.data+0x1)
+
+## Check patching of negative addends
+
+# RUN: llvm-mc -filetype=obj -triple=s390x -defsym ADDEND=1 %t/addend.s -o %t/1.o
+# RUN: llvm-mc -filetype=obj -triple=s390x -defsym ADDEND=2147483648 %t/addend.s -o %t/2.o
+# RUN: llvm-mc -filetype=obj -triple=s390x -defsym ADDEND=2147483649 %t/addend.s -o %t/3.o
+
+# RUN: ld.lld --section-start=.text=0x0 %t/1.o -o %t/1out
+# RUN: ld.lld --section-start=.text=0x0 %t/2.o -o %t/2out
+# RUN: not ld.lld --section-start=.text=0x0 %t/3.o -o /dev/null 2>&1 | FileCheck %s -DFILE=%t/3.o --check-prefix RANGE
+
+# RANGE: error: [[FILE]]:(.text+0x0): relocation R_390_PC32 out of range
+
+# RUN: llvm-readelf --hex-dump=.text %t/1out | FileCheck %s -DADDEND=ffffffff --check-prefix DUMP
+# RUN: llvm-readelf --hex-dump=.text %t/2out | FileCheck %s -DADDEND=80000000 --check-prefix DUMP
+
+# DUMP: 0x00000000 [[ADDEND]]
+
+#--- shared.s
+.data
+ .byte 0xe8
+ .long _shared - .
+
+#--- addend.s
+.text
+.globl _start
+_start:
+ .reloc ., R_390_PC32, .text-ADDEND
+ .space 4
diff --git a/lld/test/ELF/systemz-reloc-pcdbl.s b/lld/test/ELF/systemz-reloc-pcdbl.s
new file mode 100644
index 0000000..faee756
--- /dev/null
+++ b/lld/test/ELF/systemz-reloc-pcdbl.s
@@ -0,0 +1,68 @@
+# REQUIRES: systemz
+
+# RUN: llvm-mc --filetype=obj --triple=s390x-unknown-linux -mcpu=z13 %s -o %t.o
+
+# RUN: ld.lld %t.o --defsym foo16=pc16dbl+4 --defsym bar16=pc16dbl --defsym foo32=pc32dbl+6 --defsym bar32=pc32dbl --defsym foo12=pc12dbl+6 --defsym bar12=pc12dbl --defsym foo24=pc24dbl+6 --defsym bar24=pc24dbl -o %t
+# RUN: llvm-objdump --no-show-raw-insn --mcpu=z13 -d %t | FileCheck %s --check-prefix=CHECK
+# CHECK: 0000000001001120 <pc16dbl>:
+# CHECK: je 0x1001124
+# CHECK: jne 0x1001120
+# CHECK: 0000000001001128 <pc32dbl>:
+# CHECK: jge 0x100112e
+# CHECK: jgne 0x1001128
+# CHECK: 0000000001001134 <pc12dbl>:
+# CHECK: bprp 5, 0x100113a, 0x1001134
+# CHECK: bprp 6, 0x1001134, 0x100113a
+# CHECK: 0000000001001140 <pc24dbl>:
+# CHECK: bprp 5, 0x1001140, 0x1001146
+# CHECK: bprp 6, 0x1001146, 0x1001140
+
+# RUN: ld.lld %t.o --defsym foo16=pc16dbl+0xfffe --defsym bar16=pc16dbl+4-0x10000 --defsym foo32=pc32dbl+0xfffffffe --defsym bar32=pc32dbl+6-0x100000000 --defsym foo12=pc12dbl+0xffe --defsym bar12=pc12dbl+6-0x1000 --defsym foo24=pc24dbl+0xfffffe --defsym bar24=pc24dbl+6-0x1000000 -o %t.limits
+# RUN: llvm-objdump --no-show-raw-insn --mcpu=z13 -d %t.limits | FileCheck %s --check-prefix=LIMITS
+# LIMITS: je 0x101111e
+# LIMITS-NEXT: jne 0xff1124
+# LIMITS: jge 0x101001126
+# LIMITS-NEXT: jgne 0xffffffff0100112e
+# LIMITS: bprp 5, 0x1002132, 0x1001134
+# LIMITS-NEXT: bprp 6, 0x100013a, 0x100113a
+# LIMITS: bprp 5, 0x1001140, 0x200113e
+# LIMITS-NEXT: bprp 6, 0x1001146, 0x1146
+
+# RUN: not ld.lld %t.o --defsym foo16=pc16dbl+0x10000 --defsym bar16=pc16dbl+4-0x10002 --defsym foo32=pc32dbl+0x100000000 --defsym bar32=pc32dbl+6-0x100000002 --defsym foo12=pc12dbl+0x1000 --defsym bar12=pc12dbl+6-0x1002 --defsym foo24=pc24dbl+0x1000000 --defsym bar24=pc24dbl+6-0x1000002 -o /dev/null 2>&1 | FileCheck -DFILE=%t.o --check-prefix=ERROR-RANGE %s
+# ERROR-RANGE: error: [[FILE]]:(.text+0x2): relocation R_390_PC16DBL out of range: 65536 is not in [-65536, 65535]; references 'foo16'
+# ERROR-RANGE: error: [[FILE]]:(.text+0x6): relocation R_390_PC16DBL out of range: -65538 is not in [-65536, 65535]; references 'bar16'
+# ERROR-RANGE: error: [[FILE]]:(.text+0xa): relocation R_390_PC32DBL out of range: 4294967296 is not in [-4294967296, 4294967295]; references 'foo32'
+# ERROR-RANGE: error: [[FILE]]:(.text+0x10): relocation R_390_PC32DBL out of range: -4294967298 is not in [-4294967296, 4294967295]; references 'bar32'
+# ERROR-RANGE: error: [[FILE]]:(.text+0x15): relocation R_390_PC12DBL out of range: 4096 is not in [-4096, 4095]; references 'foo12'
+# ERROR-RANGE: error: [[FILE]]:(.text+0x1b): relocation R_390_PC12DBL out of range: -4098 is not in [-4096, 4095]; references 'bar12'
+# ERROR-RANGE: error: [[FILE]]:(.text+0x23): relocation R_390_PC24DBL out of range: 16777216 is not in [-16777216, 16777215]; references 'foo24'
+# ERROR-RANGE: error: [[FILE]]:(.text+0x29): relocation R_390_PC24DBL out of range: -16777218 is not in [-16777216, 16777215]; references 'bar24'
+
+# RUN: not ld.lld %t.o --defsym foo16=pc16dbl+1 --defsym bar16=pc16dbl-1 --defsym foo32=pc32dbl+1 --defsym bar32=pc32dbl-1 --defsym foo12=pc12dbl+1 --defsym bar12=pc12dbl-1 --defsym foo24=pc24dbl+1 --defsym bar24=pc24dbl-1 -o /dev/null 2>&1 | FileCheck -DFILE=%t.o --check-prefix=ERROR-ALIGN %s
+# ERROR-ALIGN: error: [[FILE]]:(.text+0x2): improper alignment for relocation R_390_PC16DBL: 0x1 is not aligned to 2 bytes
+# ERROR-ALIGN-NEXT: error: [[FILE]]:(.text+0x6): improper alignment for relocation R_390_PC16DBL: 0xFFFFFFFFFFFFFFFB is not aligned to 2 bytes
+# ERROR-ALIGN-NEXT: error: [[FILE]]:(.text+0xa): improper alignment for relocation R_390_PC32DBL: 0x1 is not aligned to 2 bytes
+# ERROR-ALIGN-NEXT: error: [[FILE]]:(.text+0x10): improper alignment for relocation R_390_PC32DBL: 0xFFFFFFFFFFFFFFF9 is not aligned to 2 bytes
+# ERROR-ALIGN-NEXT: error: [[FILE]]:(.text+0x15): improper alignment for relocation R_390_PC12DBL: 0x1 is not aligned to 2 bytes
+# ERROR-ALIGN-NEXT: error: [[FILE]]:(.text+0x1b): improper alignment for relocation R_390_PC12DBL: 0xFFFFFFFFFFFFFFF9 is not aligned to 2 bytes
+# ERROR-ALIGN-NEXT: error: [[FILE]]:(.text+0x23): improper alignment for relocation R_390_PC24DBL: 0x1 is not aligned to 2 bytes
+# ERROR-ALIGN-NEXT: error: [[FILE]]:(.text+0x29): improper alignment for relocation R_390_PC24DBL: 0xFFFFFFFFFFFFFFF9 is not aligned to 2 bytes
+
+.global _start
+.global pc16dbl
+.global pc32dbl
+.global pc12dbl
+.global pc24dbl
+_start:
+pc16dbl:
+ je foo16
+ jne bar16
+pc32dbl:
+ jge foo32
+ jgne bar32
+pc12dbl:
+ bprp 5,foo12,0
+ bprp 6,bar12,0
+pc24dbl:
+ bprp 5,0,foo24
+ bprp 6,0,bar24
diff --git a/lld/test/ELF/systemz-tls-gd.s b/lld/test/ELF/systemz-tls-gd.s
new file mode 100644
index 0000000..3976f55
--- /dev/null
+++ b/lld/test/ELF/systemz-tls-gd.s
@@ -0,0 +1,142 @@
+# REQUIRES: systemz
+# RUN: llvm-mc -filetype=obj -triple=s390x-unknown-linux %s -o %t.o
+# RUN: echo '.tbss; .globl b, c; b: .zero 4; c:' | llvm-mc -filetype=obj -triple=s390x-unknown-linux - -o %t1.o
+# RUN: ld.lld -shared -soname=t1.so %t1.o -o %t1.so
+
+# RUN: ld.lld -shared %t.o %t1.o -o %t.so
+# RUN: llvm-readelf -r %t.so | FileCheck --check-prefix=GD-REL %s
+# RUN: llvm-objdump -d --no-show-raw-insn %t.so | FileCheck --check-prefix=GD %s
+# RUN: llvm-objdump --section .data.rel.ro --full-contents %t.so | FileCheck --check-prefix=GD-DATA %s
+
+# RUN: ld.lld %t.o %t1.o -o %t.le
+# RUN: llvm-readelf -r %t.le | FileCheck --check-prefix=NOREL %s
+# RUN: llvm-objdump -d --no-show-raw-insn %t.le | FileCheck --check-prefix=LE %s
+# RUN: llvm-objdump --section .data.rel.ro --full-contents %t.le | FileCheck --check-prefix=LE-DATA %s
+
+# RUN: ld.lld %t.o %t1.so -o %t.ie
+# RUN: llvm-readelf -r %t.ie | FileCheck --check-prefix=IE-REL %s
+# RUN: llvm-objdump -d --no-show-raw-insn %t.ie | FileCheck --check-prefix=IE %s
+# RUN: llvm-objdump --section .data.rel.ro --full-contents %t.ie | FileCheck --check-prefix=IE-DATA %s
+
+# GD-REL: Relocation section '.rela.dyn' at offset {{.*}} contains 6 entries:
+# GD-REL: 0000000000002570 0000000200000036 R_390_TLS_DTPMOD 0000000000000008 a + 0
+# GD-REL-NEXT: 0000000000002578 0000000200000037 R_390_TLS_DTPOFF 0000000000000008 a + 0
+# GD-REL-NEXT: 0000000000002580 0000000300000036 R_390_TLS_DTPMOD 000000000000000c b + 0
+# GD-REL-NEXT: 0000000000002588 0000000300000037 R_390_TLS_DTPOFF 000000000000000c b + 0
+# GD-REL-NEXT: 0000000000002590 0000000400000036 R_390_TLS_DTPMOD 0000000000000010 c + 0
+# GD-REL-NEXT: 0000000000002598 0000000400000037 R_390_TLS_DTPOFF 0000000000000010 c + 0
+
+## _GLOBAL_OFFSET_TABLE is at 0x2558
+# GD: larl %r12, 0x2558
+
+## GOT offset of the TLS module ID / offset pair for a is at 0x2460
+# GD-NEXT: lgrl %r2, 0x2460
+# GD-NEXT: brasl %r14, 0x1440
+# GD-NEXT: lgf %r2, 0(%r2,%r7)
+
+## GOT offset of the TLS module ID / offset pair for b is at 0x2468
+# GD-NEXT: lgrl %r2, 0x2468
+# GD-NEXT: brasl %r14, 0x1440
+# GD-NEXT: lgf %r2, 0(%r2,%r7)
+
+## GOT offset of the TLS module ID / offset pair for c is at 0x2470
+# GD-NEXT: lgrl %r2, 0x2470
+# GD-NEXT: brasl %r14, 0x1440
+# GD-NEXT: lgf %r2, 0(%r2,%r7)
+
+## Constant pool holding GOT offsets of TLS module ID / offset pairs:
+# a: 0x2570 / 0x18
+# b: 0x2580 / 0x28
+# c: 0x2590 / 0x38
+# GD-DATA: 2460 00000000 00000018 00000000 00000028
+# GD-DATA-NEXT: 2470 00000000 00000038
+
+# NOREL: no relocations
+
+## _GLOBAL_OFFSET_TABLE is at 0x1002230
+# LE: larl %r12, 0x1002230
+
+## TP offset of a is at 0x1002218
+# LE-NEXT: lgrl %r2, 0x1002218
+# LE-NEXT: brcl 0,
+# LE-NEXT: lgf %r2, 0(%r2,%r7)
+
+## TP offset of b is at 0x1002220
+# LE-NEXT: lgrl %r2, 0x1002220
+# LE-NEXT: brcl 0,
+# LE-NEXT: lgf %r2, 0(%r2,%r7)
+
+## TP offset of c is at 0x1002228
+# LE-NEXT: lgrl %r2, 0x1002228
+# LE-NEXT: brcl 0,
+# LE-NEXT: lgf %r2, 0(%r2,%r7)
+
+## TP offsets
+# a: -8
+# b: -4
+# c: 0
+# LE-DATA: 1002218 ffffffff fffffff8 ffffffff fffffffc
+# LE-DATA-NEXT: 1002228 00000000 00000000
+
+
+# IE-REL: Relocation section '.rela.dyn' at offset {{.*}} contains 2 entries:
+# IE-REL: 0000000001002430 0000000200000038 R_390_TLS_TPOFF 0000000000000000 b + 0
+# IE-REL-NEXT: 0000000001002438 0000000300000038 R_390_TLS_TPOFF 0000000000000000 c + 0
+
+## _GLOBAL_OFFSET_TABLE is at 0x1002418
+# IE: larl %r12, 0x1002418
+
+## TP offset of a is at 0x1002340
+# IE-NEXT: lgrl %r2, 0x1002340
+# IE-NEXT: brcl 0,
+# IE-NEXT: lgf %r2, 0(%r2,%r7)
+
+## GOT offset of the TP offset for b is at 0x1002348
+# IE-NEXT: lgrl %r2, 0x1002348
+# IE-NEXT: lg %r2, 0(%r2,%r12)
+# IE-NEXT: lgf %r2, 0(%r2,%r7)
+
+## GOT offset of the TP offset for c is at 0x1002350
+# IE-NEXT: lgrl %r2, 0x1002350
+# IE-NEXT: lg %r2, 0(%r2,%r12)
+# IE-NEXT: lgf %r2, 0(%r2,%r7)
+
+## TP offsets (a) / GOT offset of TP offsets (b, c)
+# a: -4
+# b: 0x1002430 / 0x18
+# c: 0x1002438 / 0x20
+# IE-DATA: 1002340 ffffffff fffffffc 00000000 00000018
+# IE-DATA-NEXT: 1002350 00000000 00000020
+
+
+ear %r7,%a0
+sllg %r7,%r1,32
+ear %r7,%a1
+larl %r12,_GLOBAL_OFFSET_TABLE_
+
+lgrl %r2,.LC0
+brasl %r14,__tls_get_offset@PLT:tls_gdcall:a
+lgf %r2,0(%r2,%r7)
+
+lgrl %r2,.LC1
+brasl %r14,__tls_get_offset@PLT:tls_gdcall:b
+lgf %r2,0(%r2,%r7)
+
+lgrl %r2,.LC2
+brasl %r14,__tls_get_offset@PLT:tls_gdcall:c
+lgf %r2,0(%r2,%r7)
+
+ .section .data.rel.ro,"aw"
+ .align 8
+.LC0:
+ .quad a@TLSGD
+.LC1:
+ .quad b@TLSGD
+.LC2:
+ .quad c@TLSGD
+
+ .section .tbss
+ .globl a
+ .zero 8
+a:
+ .zero 4
diff --git a/lld/test/ELF/systemz-tls-ie.s b/lld/test/ELF/systemz-tls-ie.s
new file mode 100644
index 0000000..85e2f24
--- /dev/null
+++ b/lld/test/ELF/systemz-tls-ie.s
@@ -0,0 +1,121 @@
+# REQUIRES: systemz
+# RUN: llvm-mc -filetype=obj -triple=s390x-unknown-linux %s -o %t.o
+
+# RUN: ld.lld -shared %t.o -o %t.so
+# RUN: llvm-readelf -r %t.so | FileCheck --check-prefix=IE-REL %s
+# RUN: llvm-objdump -d --no-show-raw-insn %t.so | FileCheck --check-prefix=IE %s
+# RUN: llvm-objdump --section .data --full-contents %t.so | FileCheck --check-prefix=IE-DATA %s
+
+# RUN: ld.lld %t.o -o %t
+# RUN: llvm-readelf -r %t | FileCheck --check-prefix=NOREL %s
+# RUN: llvm-objdump -d --no-show-raw-insn %t | FileCheck --check-prefix=LE %s
+# RUN: llvm-objdump --section .data --full-contents %t | FileCheck --check-prefix=LE-DATA %s
+# RUN: llvm-objdump --section .got --full-contents %t | FileCheck --check-prefix=LE-GOT %s
+
+## With -pie we still have the R_390_RELATIVE for the data element, but all GOT
+## entries should be fully resolved without any remaining R_390_TLS_TPOFF.
+# RUN: ld.lld -pie %t.o -o %t.pie
+# RUN: llvm-readelf -r %t.pie | FileCheck --check-prefix=PIE-REL %s
+# RUN: llvm-objdump -d --no-show-raw-insn %t.pie | FileCheck --check-prefix=PIE %s
+# RUN: llvm-objdump --section .data --full-contents %t.pie | FileCheck --check-prefix=PIE-DATA %s
+# RUN: llvm-objdump --section .got --full-contents %t.pie | FileCheck --check-prefix=PIE-GOT %s
+
+# IE-REL: Relocation section '.rela.dyn' at offset {{.*}} contains 4 entries:
+# IE-REL: 0000000000003478 000000000000000c R_390_RELATIVE 2460
+# IE-REL: 0000000000002460 0000000100000038 R_390_TLS_TPOFF 0000000000000008 a + 0
+# IE-REL: 0000000000002468 0000000200000038 R_390_TLS_TPOFF 000000000000000c b + 0
+# IE-REL: 0000000000002470 0000000300000038 R_390_TLS_TPOFF 0000000000000010 c + 0
+
+## TP offset for a is at 0x2460
+# IE: lgrl %r1, 0x2460
+# IE-NEXT: lgf %r1, 0(%r1,%r7)
+
+## TP offset for b is at 0x2468
+# IE-NEXT: lgrl %r1, 0x2468
+# IE-NEXT: lgf %r1, 0(%r1,%r7)
+
+## TP offset for c is at 0x2470
+# IE-NEXT: lgrl %r1, 0x2470
+# IE-NEXT: lgf %r1, 0(%r1,%r7)
+
+## Data element: TP offset for a is at 0x2460 (relocated via R_390_RELATIVE above)
+# IE-DATA: 3478 00000000 00000000
+
+# NOREL: no relocations
+
+## TP offset for a is at 0x1002250
+# LE: lgrl %r1, 0x1002250
+# LE-NEXT: lgf %r1, 0(%r1,%r7)
+
+## TP offset for b is at 0x1002258
+# LE-NEXT: lgrl %r1, 0x1002258
+# LE-NEXT: lgf %r1, 0(%r1,%r7)
+
+## TP offset for c is at 0x1002260
+# LE-NEXT: lgrl %r1, 0x1002260
+# LE-NEXT: lgf %r1, 0(%r1,%r7)
+
+## Data element: TP offset for a is at 0x1002250
+# LE-DATA: 00000000 01002250
+
+## TP offsets in GOT:
+# a: -8
+# b: -4
+# c: 0
+# LE-GOT: 1002238 00000000 00000000 00000000 00000000
+# LE-GOT: 1002248 00000000 00000000 ffffffff fffffff8
+# LE-GOT: 1002258 ffffffff fffffffc 00000000 00000000
+
+# PIE-REL: Relocation section '.rela.dyn' at offset {{.*}} contains 1 entries:
+# PIE-REL: 00000000000033d0 000000000000000c R_390_RELATIVE 23b8
+
+## TP offset for a is at 0x23b8
+# PIE: lgrl %r1, 0x23b8
+# PIE-NEXT: lgf %r1, 0(%r1,%r7)
+
+## TP offset for b is at 0x23c0
+# PIE-NEXT: lgrl %r1, 0x23c0
+# PIE-NEXT: lgf %r1, 0(%r1,%r7)
+
+## TP offset for c is at 0x23c8
+# PIE-NEXT: lgrl %r1, 0x23c8
+# PIE-NEXT: lgf %r1, 0(%r1,%r7)
+
+## Data element: TP offset for a is at 0x23b8 (relocated via R_390_RELATIVE above)
+# PIE-DATA: 33d0 00000000 00000000
+
+## TP offsets in GOT:
+# a: -8
+# b: -4
+# c: 0
+# PIE-GOT: 23a0 00000000 000022d0 00000000 00000000
+# PIE-GOT: 23b0 00000000 00000000 ffffffff fffffff8
+# PIE-GOT: 23c0 ffffffff fffffffc 00000000 00000000
+
+ear %r7,%a0
+sllg %r7,%r1,32
+ear %r7,%a1
+
+lgrl %r1, a@indntpoff
+lgf %r1,0(%r1,%r7)
+
+lgrl %r1, b@indntpoff
+lgf %r1,0(%r1,%r7)
+
+lgrl %r1, c@indntpoff
+lgf %r1,0(%r1,%r7)
+
+ .data
+ .reloc .,R_390_TLS_IE64,a
+ .space 8
+
+ .section .tbss
+ .globl a
+ .globl b
+ .globl c
+ .zero 8
+a:
+ .zero 4
+b:
+ .zero 4
+c:
diff --git a/lld/test/ELF/systemz-tls-ld.s b/lld/test/ELF/systemz-tls-ld.s
new file mode 100644
index 0000000..2cb36d7
--- /dev/null
+++ b/lld/test/ELF/systemz-tls-ld.s
@@ -0,0 +1,114 @@
+# REQUIRES: systemz
+# RUN: llvm-mc -filetype=obj -triple=s390x-unknown-linux %s -o %t.o
+
+# RUN: ld.lld -shared %t.o -o %t.so
+# RUN: llvm-readelf -r %t.so | FileCheck --check-prefix=LD-REL %s
+# RUN: llvm-objdump -d --no-show-raw-insn %t.so | FileCheck --check-prefix=LD %s
+# RUN: llvm-objdump --section .data.rel.ro --full-contents %t.so | FileCheck --check-prefix=LD-DATA %s
+
+# RUN: ld.lld %t.o -o %t
+# RUN: llvm-readelf -r %t | FileCheck --check-prefix=NOREL %s
+# RUN: llvm-objdump -d --no-show-raw-insn %t | FileCheck --check-prefix=LE %s
+# RUN: llvm-objdump --section .data.rel.ro --full-contents %t | FileCheck --check-prefix=LE-DATA %s
+
+# LD-REL: Relocation section '.rela.dyn' at offset {{.*}} contains 1 entries:
+# LD-REL: 00000000000024f8 0000000000000036 R_390_TLS_DTPMOD 0
+
+## _GLOBAL_OFFSET_TABLE is at 0x24e0
+# LD: larl %r12, 0x24e0
+
+## GOT offset of the LDM TLS module ID is at 0x23e0
+# LD-NEXT: lgrl %r2, 0x23e0
+# LD-NEXT: brasl %r14, 0x13c0
+# LD-NEXT: la %r2, 0(%r2,%r7)
+
+## DTP offset for a is at 0x23e8
+# LD-NEXT: lgrl %r1, 0x23e8
+# LD-NEXT: lgf %r1, 0(%r1,%r2)
+
+## DTP offset for b is at 0x23f0
+# LD-NEXT: lgrl %r1, 0x23f0
+# LD-NEXT: lgf %r1, 0(%r1,%r2)
+
+## DTP offset for c is at 0x23f8
+# LD-NEXT: lgrl %r1, 0x23f8
+# LD-NEXT: lgf %r1, 0(%r1,%r2)
+
+## Constant pool holding GOT offsets of TLS module ID and DTP offsets:
+# TLS module ID: 0x24f8 / 0x18
+# a: 8
+# b: 12
+# c: 16
+# LD-DATA: 23e0 00000000 00000018 00000000 00000008
+# LD-DATA: 23f0 00000000 0000000c 00000000 00000010
+
+# NOREL: no relocations
+
+## _GLOBAL_OFFSET_TABLE is at 0x1002230
+# LE: larl %r12, 0x1002230
+
+## GOT offset of the LDM TLS module ID is at 0x1002210
+# LE-NEXT: lgrl %r2, 0x1002210
+# LE-NEXT: brcl 0,
+# LE-NEXT: la %r2, 0(%r2,%r7)
+
+## TP offset for a is at 0x1002218
+# LE-NEXT: lgrl %r1, 0x1002218
+# LE-NEXT: lgf %r1, 0(%r1,%r2)
+
+## TP offset for b is at 0x1002220
+# LE-NEXT: lgrl %r1, 0x1002220
+# LE-NEXT: lgf %r1, 0(%r1,%r2)
+
+## TP offset for c is at 0x1002228
+# LE-NEXT: lgrl %r1, 0x1002228
+# LE-NEXT: lgf %r1, 0(%r1,%r2)
+
+## zeroed LDM / TP offsets:
+# LDM TLS: 0
+# a: -8
+# b: -4
+# c: 0
+# LE-DATA: 1002210 00000000 00000000 ffffffff fffffff8
+# LE-DATA: 1002220 ffffffff fffffffc 00000000 00000000
+
+
+ear %r7,%a0
+sllg %r7,%r1,32
+ear %r7,%a1
+larl %r12,_GLOBAL_OFFSET_TABLE_
+
+lgrl %r2,.LC0
+brasl %r14,__tls_get_offset@PLT:tls_ldcall:a
+la %r2,0(%r2,%r7)
+
+lgrl %r1, .LC1
+lgf %r1,0(%r1,%r2)
+
+lgrl %r1, .LC2
+lgf %r1,0(%r1,%r2)
+
+lgrl %r1, .LC3
+lgf %r1,0(%r1,%r2)
+
+ .section .data.rel.ro,"aw"
+ .align 8
+.LC0:
+ .quad a@TLSLDM
+.LC1:
+ .quad a@DTPOFF
+.LC2:
+ .quad b@DTPOFF
+.LC3:
+ .quad c@DTPOFF
+
+ .section .tbss
+ .globl a
+ .globl b
+ .globl c
+ .zero 8
+a:
+ .zero 4
+b:
+ .zero 4
+c:
diff --git a/lld/test/ELF/systemz-tls-le.s b/lld/test/ELF/systemz-tls-le.s
new file mode 100644
index 0000000..9e41fc7
--- /dev/null
+++ b/lld/test/ELF/systemz-tls-le.s
@@ -0,0 +1,61 @@
+# REQUIRES: systemz
+# RUN: llvm-mc -filetype=obj -triple=s390x-unknown-linux %s -o %t.o
+
+# RUN: ld.lld %t.o -o %t
+# RUN: llvm-readelf -r %t | FileCheck --check-prefix=NOREL %s
+# RUN: llvm-objdump -d --no-show-raw-insn %t | FileCheck --check-prefix=LE %s
+# RUN: llvm-objdump --section .data.rel.ro --full-contents %t | FileCheck --check-prefix=LE-DATA %s
+
+# NOREL: no relocations
+
+## TP offset for a is at 0x1002200
+# LE: lgrl %r1, 0x1002200
+# LE-NEXT: lgf %r1, 0(%r1,%r7)
+
+## TP offset for b is at 0x1002208
+# LE-NEXT: lgrl %r1, 0x1002208
+# LE-NEXT: lgf %r1, 0(%r1,%r7)
+
+## TP offset for c is at 0x1002210
+# LE-NEXT: lgrl %r1, 0x1002210
+# LE-NEXT: lgf %r1, 0(%r1,%r7)
+
+## TP offsets:
+# a: -8
+# b: -4
+# c: 0
+# LE-DATA: 1002200 ffffffff fffffff8 ffffffff fffffffc
+# LE-DATA: 1002210 00000000 00000000
+
+ear %r7,%a0
+sllg %r7,%r1,32
+ear %r7,%a1
+
+lgrl %r1, .LC0
+lgf %r1,0(%r1,%r7)
+
+lgrl %r1, .LC1
+lgf %r1,0(%r1,%r7)
+
+lgrl %r1, .LC2
+lgf %r1,0(%r1,%r7)
+
+ .section .data.rel.ro,"aw"
+ .align 8
+.LC0:
+ .quad a@ntpoff
+.LC1:
+ .quad b@ntpoff
+.LC2:
+ .quad c@ntpoff
+
+ .section .tbss
+ .globl a
+ .globl b
+ .globl c
+ .zero 8
+a:
+ .zero 4
+b:
+ .zero 4
+c:
diff --git a/lld/test/MinGW/driver.test b/lld/test/MinGW/driver.test
index 46b3b6d..a4e9e5e1b 100644
--- a/lld/test/MinGW/driver.test
+++ b/lld/test/MinGW/driver.test
@@ -415,6 +415,13 @@ LTO_OPTS: -mllvm:-mcpu=x86-64 -opt:lldlto=2 -dwodir:foo -lto-cs-profile-generate
RUN: ld.lld -### foo.o -m i386pep --lto-O2 --lto-CGO1 --lto-cs-profile-generate --lto-cs-profile-file=foo 2>&1 | FileCheck -check-prefix=LTO_OPTS2 %s
LTO_OPTS2:-opt:lldlto=2 -opt:lldltocgo=1 -lto-cs-profile-generate -lto-cs-profile-file:foo
+RUN: ld.lld -### foo.o -m i386pe -plugin-opt=emit-asm 2>&1 | FileCheck -check-prefix=LTO_EMIT_ASM %s
+RUN: ld.lld -### foo.o -m i386pe --lto-emit-asm 2>&1 | FileCheck -check-prefix=LTO_EMIT_ASM %s
+LTO_EMIT_ASM: -lldemit:asm
+
+RUN: ld.lld -### foo.o -m i386pe -plugin-opt=emit-llvm 2>&1 | FileCheck -check-prefix=LTO_EMIT_LLVM %s
+LTO_EMIT_LLVM: -lldemit:llvm
+
Test GCC specific LTO options that GCC passes unconditionally, that we ignore.
RUN: ld.lld -### foo.o -m i386pep -plugin /usr/lib/gcc/x86_64-w64-mingw32/10-posix/liblto_plugin.so -plugin-opt=/usr/lib/gcc/x86_64-w64-mingw32/10-posix/lto-wrapper -plugin-opt=-fresolution=/tmp/ccM9d4fP.res -plugin-opt=-pass-through=-lmingw32 2> /dev/null
diff --git a/lld/test/lit.cfg.py b/lld/test/lit.cfg.py
index b3e07f1..d309c2a 100644
--- a/lld/test/lit.cfg.py
+++ b/lld/test/lit.cfg.py
@@ -83,6 +83,7 @@ llvm_config.feature_config(
"PowerPC": "ppc",
"RISCV": "riscv",
"Sparc": "sparc",
+ "SystemZ": "systemz",
"WebAssembly": "wasm",
"X86": "x86",
},
diff --git a/lldb/bindings/python/CMakeLists.txt b/lldb/bindings/python/CMakeLists.txt
index c941f76..73b1239 100644
--- a/lldb/bindings/python/CMakeLists.txt
+++ b/lldb/bindings/python/CMakeLists.txt
@@ -96,13 +96,15 @@ function(finish_swig_python swig_target lldb_python_bindings_dir lldb_python_tar
${lldb_python_target_dir}
"utils"
FILES "${LLDB_SOURCE_DIR}/examples/python/in_call_stack.py"
- "${LLDB_SOURCE_DIR}/examples/python/symbolication.py")
+ "${LLDB_SOURCE_DIR}/examples/python/symbolication.py"
+ )
create_python_package(
${swig_target}
${lldb_python_target_dir}
"plugins"
FILES
+ "${LLDB_SOURCE_DIR}/examples/python/templates/parsed_cmd.py"
"${LLDB_SOURCE_DIR}/examples/python/templates/scripted_process.py"
"${LLDB_SOURCE_DIR}/examples/python/templates/scripted_platform.py"
"${LLDB_SOURCE_DIR}/examples/python/templates/operating_system.py")
diff --git a/lldb/bindings/python/python-wrapper.swig b/lldb/bindings/python/python-wrapper.swig
index 17bc7b1f..1370afc 100644
--- a/lldb/bindings/python/python-wrapper.swig
+++ b/lldb/bindings/python/python-wrapper.swig
@@ -287,12 +287,12 @@ PythonObject lldb_private::python::SWIGBridge::LLDBSwigPythonCreateScriptedThrea
}
bool lldb_private::python::SWIGBridge::LLDBSWIGPythonCallThreadPlan(
- void *implementor, const char *method_name, lldb_private::Event *event,
+ void *implementer, const char *method_name, lldb_private::Event *event,
bool &got_error) {
got_error = false;
PyErr_Cleaner py_err_cleaner(false);
- PythonObject self(PyRefType::Borrowed, static_cast<PyObject *>(implementor));
+ PythonObject self(PyRefType::Borrowed, static_cast<PyObject *>(implementer));
auto pfunc = self.ResolveName<PythonCallable>(method_name);
if (!pfunc.IsAllocated())
@@ -325,12 +325,12 @@ bool lldb_private::python::SWIGBridge::LLDBSWIGPythonCallThreadPlan(
}
bool lldb_private::python::SWIGBridge::LLDBSWIGPythonCallThreadPlan(
- void *implementor, const char *method_name, lldb_private::Stream *stream,
+ void *implementer, const char *method_name, lldb_private::Stream *stream,
bool &got_error) {
got_error = false;
PyErr_Cleaner py_err_cleaner(false);
- PythonObject self(PyRefType::Borrowed, static_cast<PyObject *>(implementor));
+ PythonObject self(PyRefType::Borrowed, static_cast<PyObject *>(implementer));
auto pfunc = self.ResolveName<PythonCallable>(method_name);
if (!pfunc.IsAllocated())
@@ -831,6 +831,29 @@ bool lldb_private::python::SWIGBridge::LLDBSwigPythonCallCommandObject(
return true;
}
+#include "lldb/Interpreter/CommandReturnObject.h"
+
+bool lldb_private::python::SWIGBridge::LLDBSwigPythonCallParsedCommandObject(
+ PyObject *implementor, lldb::DebuggerSP debugger, lldb_private::StructuredDataImpl &args_impl,
+ lldb_private::CommandReturnObject &cmd_retobj,
+ lldb::ExecutionContextRefSP exe_ctx_ref_sp) {
+
+ PyErr_Cleaner py_err_cleaner(true);
+
+ PythonObject self(PyRefType::Borrowed, implementor);
+ auto pfunc = self.ResolveName<PythonCallable>("__call__");
+
+ if (!pfunc.IsAllocated()) {
+ cmd_retobj.AppendError("Could not find '__call__' method in implementation class");
+ return false;
+ }
+
+ pfunc(SWIGBridge::ToSWIGWrapper(std::move(debugger)), SWIGBridge::ToSWIGWrapper(args_impl),
+ SWIGBridge::ToSWIGWrapper(exe_ctx_ref_sp), SWIGBridge::ToSWIGWrapper(cmd_retobj).obj());
+
+ return true;
+}
+
PythonObject lldb_private::python::SWIGBridge::LLDBSWIGPythonCreateOSPlugin(
const char *python_class_name, const char *session_dictionary_name,
const lldb::ProcessSP &process_sp) {
diff --git a/lldb/examples/python/cmdtemplate.py b/lldb/examples/python/cmdtemplate.py
index a3c30f3..49a0836 100644
--- a/lldb/examples/python/cmdtemplate.py
+++ b/lldb/examples/python/cmdtemplate.py
@@ -11,115 +11,84 @@
import inspect
import lldb
-import optparse
-import shlex
import sys
+from lldb.plugins.parsed_cmd import ParsedCommand
-
-class FrameStatCommand:
+class FrameStatCommand(ParsedCommand):
program = "framestats"
@classmethod
def register_lldb_command(cls, debugger, module_name):
- parser = cls.create_options()
- cls.__doc__ = parser.format_help()
- # Add any commands contained in this module to LLDB
- command = "command script add -o -c %s.%s %s" % (
- module_name,
- cls.__name__,
- cls.program,
- )
- debugger.HandleCommand(command)
+ ParsedCommandBase.do_register_cmd(cls, debugger, module_name)
print(
'The "{0}" command has been installed, type "help {0}" or "{0} '
'--help" for detailed help.'.format(cls.program)
)
- @classmethod
- def create_options(cls):
- usage = "usage: %prog [options]"
- description = (
- "This command is meant to be an example of how to make "
- "an LLDB command that does something useful, follows "
- "best practices, and exploits the SB API. "
- "Specifically, this command computes the aggregate "
- "and average size of the variables in the current "
- "frame and allows you to tweak exactly which variables "
- "are to be accounted in the computation."
- )
+ def setup_command_definition(self):
- # Pass add_help_option = False, since this keeps the command in line
- # with lldb commands, and we wire up "help command" to work by
- # providing the long & short help methods below.
- parser = optparse.OptionParser(
- description=description,
- prog=cls.program,
- usage=usage,
- add_help_option=False,
+ self.ov_parser.add_option(
+ "i",
+ "in-scope",
+ help = "in_scope_only = True",
+ value_type = lldb.eArgTypeBoolean,
+ dest = "bool_arg",
+ default = True,
)
- parser.add_option(
- "-i",
- "--in-scope",
- action="store_true",
- dest="inscope",
- help="in_scope_only = True",
+ self.ov_parser.add_option(
+ "i",
+ "in-scope",
+ help = "in_scope_only = True",
+ value_type = lldb.eArgTypeBoolean,
+ dest = "inscope",
default=True,
)
-
- parser.add_option(
- "-a",
- "--arguments",
- action="store_true",
- dest="arguments",
- help="arguments = True",
- default=True,
+
+ self.ov_parser.add_option(
+ "a",
+ "arguments",
+ help = "arguments = True",
+ value_type = lldb.eArgTypeBoolean,
+ dest = "arguments",
+ default = True,
)
- parser.add_option(
- "-l",
- "--locals",
- action="store_true",
- dest="locals",
- help="locals = True",
- default=True,
+ self.ov_parser.add_option(
+ "l",
+ "locals",
+ help = "locals = True",
+ value_type = lldb.eArgTypeBoolean,
+ dest = "locals",
+ default = True,
)
- parser.add_option(
- "-s",
- "--statics",
- action="store_true",
- dest="statics",
- help="statics = True",
- default=True,
+ self.ov_parser.add_option(
+ "s",
+ "statics",
+ help = "statics = True",
+ value_type = lldb.eArgTypeBoolean,
+ dest = "statics",
+ default = True,
)
- return parser
-
def get_short_help(self):
return "Example command for use in debugging"
def get_long_help(self):
- return self.help_string
+ return ("This command is meant to be an example of how to make "
+ "an LLDB command that does something useful, follows "
+ "best practices, and exploits the SB API. "
+ "Specifically, this command computes the aggregate "
+ "and average size of the variables in the current "
+ "frame and allows you to tweak exactly which variables "
+ "are to be accounted in the computation.")
+
def __init__(self, debugger, unused):
- self.parser = self.create_options()
- self.help_string = self.parser.format_help()
+ super().__init__(debugger, unused)
def __call__(self, debugger, command, exe_ctx, result):
- # Use the Shell Lexer to properly parse up command options just like a
- # shell would
- command_args = shlex.split(command)
-
- try:
- (options, args) = self.parser.parse_args(command_args)
- except:
- # if you don't handle exceptions, passing an incorrect argument to
- # the OptionParser will cause LLDB to exit (courtesy of OptParse
- # dealing with argument errors by throwing SystemExit)
- result.SetError("option parsing failed")
- return
-
# Always get program state from the lldb.SBExecutionContext passed
# in as exe_ctx
frame = exe_ctx.GetFrame()
@@ -128,7 +97,7 @@ class FrameStatCommand:
return
variables_list = frame.GetVariables(
- options.arguments, options.locals, options.statics, options.inscope
+ self.ov_parser.arguments, self.ov_parser.locals, self.ov_parser.statics, self.ov_parser.inscope
)
variables_count = variables_list.GetSize()
if variables_count == 0:
diff --git a/lldb/examples/python/templates/parsed_cmd.py b/lldb/examples/python/templates/parsed_cmd.py
new file mode 100644
index 0000000..06124ad
--- /dev/null
+++ b/lldb/examples/python/templates/parsed_cmd.py
@@ -0,0 +1,357 @@
+"""
+This module implements a couple of utility classes to make writing
+lldb parsed commands more Pythonic.
+The way to use it is to make a class for your command that inherits from ParsedCommandBase.
+That will make an LLDBOptionValueParser which you will use for your
+option definition, and to fetch option values for the current invocation
+of your command. Access to the OV parser is through:
+
+ParsedCommandBase.get_parser()
+
+Next, implement setup_command_definition() in your new command class, and call:
+
+ self.get_parser().add_option()
+
+to add all your options. The order doesn't matter for options, lldb will sort them
+alphabetically for you when it prints help.
+
+Similarly you can define the arguments with:
+
+ self.get_parser().add_argument()
+
+At present, lldb doesn't do as much work as it should verifying arguments, it
+only checks that commands that take no arguments don't get passed arguments.
+
+Then implement the execute function for your command as:
+
+ def __call__(self, debugger, args_list, exe_ctx, result):
+
+The arguments will be a list of strings.
+
+You can access the option values using the 'dest' string you passed in when defining the option.
+And if you need to know whether a given option was set by the user or not, you can
+use the was_set API.
+
+So for instance, if you have an option whose "dest" is "my_option", then:
+
+ self.get_parser().my_option
+
+will fetch the value, and:
+
+ self.get_parser().was_set("my_option")
+
+will return True if the user set this option, and False if it was left at its default
+value.
+
+There are example commands in the lldb testsuite at:
+
+llvm-project/lldb/test/API/commands/command/script/add/test_commands.py
+"""
+import inspect
+import lldb
+import sys
+from abc import abstractmethod
+
+# Some methods to translate common value types. Should return a
+# tuple of the value and an error value (True => error) if the
+# type can't be converted. These are called internally when the
+# command line is parsed into the 'dest' properties, you should
+# not need to call them directly.
+# FIXME: Need a way to push the conversion error string back to lldb.
+def to_bool(in_value):
+ error = True
+ value = False
+ if type(in_value) != str or len(in_value) == 0:
+ return (value, error)
+
+ low_in = in_value.lower()
+ if low_in in ["y", "yes", "t", "true", "1"]:
+ value = True
+ error = False
+
+ if not value and low_in in ["n", "no", "f", "false", "0"]:
+ value = False
+ error = False
+
+ return (value, error)
+
+def to_int(in_value):
+ #FIXME: Not doing errors yet...
+ return (int(in_value), False)
+
+def to_unsigned(in_value):
+ # FIXME: find an unsigned converter...
+ # And handle errors.
+ return (int(in_value), False)
+
+translators = {
+ lldb.eArgTypeBoolean : to_bool,
+ lldb.eArgTypeBreakpointID : to_unsigned,
+ lldb.eArgTypeByteSize : to_unsigned,
+ lldb.eArgTypeCount : to_unsigned,
+ lldb.eArgTypeFrameIndex : to_unsigned,
+ lldb.eArgTypeIndex : to_unsigned,
+ lldb.eArgTypeLineNum : to_unsigned,
+ lldb.eArgTypeNumLines : to_unsigned,
+ lldb.eArgTypeNumberPerLine : to_unsigned,
+ lldb.eArgTypeOffset : to_int,
+ lldb.eArgTypeThreadIndex : to_unsigned,
+ lldb.eArgTypeUnsignedInteger : to_unsigned,
+ lldb.eArgTypeWatchpointID : to_unsigned,
+ lldb.eArgTypeColumnNum : to_unsigned,
+ lldb.eArgTypeRecognizerID : to_unsigned,
+ lldb.eArgTypeTargetID : to_unsigned,
+ lldb.eArgTypeStopHookID : to_unsigned
+}
+
+def translate_value(value_type, value):
+ try:
+ return translators[value_type](value)
+ except KeyError:
+ # If we don't have a translator, return the string value.
+ return (value, False)
+
+class LLDBOptionValueParser:
+ """
+ This class holds the option definitions for the command, and when
+ the command is run, you can ask the parser for the current values. """
+
+ def __init__(self):
+ # This is a dictionary of dictionaries. The key is the long option
+ # name, and the value is the rest of the definition.
+ self.options_dict = {}
+ self.args_array = []
+
+
+ # FIXME: would this be better done on the C++ side?
+ # The common completers are missing some useful ones.
+ # For instance there really should be a common Type completer
+ # And an "lldb command name" completer.
+ completion_table = {
+ lldb.eArgTypeAddressOrExpression : lldb.eVariablePathCompletion,
+ lldb.eArgTypeArchitecture : lldb.eArchitectureCompletion,
+ lldb.eArgTypeBreakpointID : lldb.eBreakpointCompletion,
+ lldb.eArgTypeBreakpointIDRange : lldb.eBreakpointCompletion,
+ lldb.eArgTypeBreakpointName : lldb.eBreakpointNameCompletion,
+ lldb.eArgTypeClassName : lldb.eSymbolCompletion,
+ lldb.eArgTypeDirectoryName : lldb.eDiskDirectoryCompletion,
+ lldb.eArgTypeExpression : lldb.eVariablePathCompletion,
+ lldb.eArgTypeExpressionPath : lldb.eVariablePathCompletion,
+ lldb.eArgTypeFilename : lldb.eDiskFileCompletion,
+ lldb.eArgTypeFrameIndex : lldb.eFrameIndexCompletion,
+ lldb.eArgTypeFunctionName : lldb.eSymbolCompletion,
+ lldb.eArgTypeFunctionOrSymbol : lldb.eSymbolCompletion,
+ lldb.eArgTypeLanguage : lldb.eTypeLanguageCompletion,
+ lldb.eArgTypePath : lldb.eDiskFileCompletion,
+ lldb.eArgTypePid : lldb.eProcessIDCompletion,
+ lldb.eArgTypeProcessName : lldb.eProcessNameCompletion,
+ lldb.eArgTypeRegisterName : lldb.eRegisterCompletion,
+ lldb.eArgTypeRunArgs : lldb.eDiskFileCompletion,
+ lldb.eArgTypeShlibName : lldb.eModuleCompletion,
+ lldb.eArgTypeSourceFile : lldb.eSourceFileCompletion,
+ lldb.eArgTypeSymbol : lldb.eSymbolCompletion,
+ lldb.eArgTypeThreadIndex : lldb.eThreadIndexCompletion,
+ lldb.eArgTypeVarName : lldb.eVariablePathCompletion,
+ lldb.eArgTypePlatform : lldb.ePlatformPluginCompletion,
+ lldb.eArgTypeWatchpointID : lldb.eWatchpointIDCompletion,
+ lldb.eArgTypeWatchpointIDRange : lldb.eWatchpointIDCompletion,
+ lldb.eArgTypeModuleUUID : lldb.eModuleUUIDCompletion,
+ lldb.eArgTypeStopHookID : lldb.eStopHookIDCompletion
+ }
+
+ @classmethod
+ def determine_completion(cls, arg_type):
+ return cls.completion_table.get(arg_type, lldb.eNoCompletion)
+
+ def add_argument_set(self, arguments):
+ self.args_array.append(arguments)
+
+ def get_option_element(self, long_name):
+ return self.options_dict.get(long_name, None)
+
+ def is_enum_opt(self, opt_name):
+ elem = self.get_option_element(opt_name)
+ if not elem:
+ return False
+ return "enum_values" in elem
+
+ def option_parsing_started(self):
+ """ This makes the ivars for all the "dest" values in the array and gives them
+ their default values. You should not have to call this by hand, though if
+ you have some option that needs to do some work when a new command invocation
+ starts, you can override this to handle your special option. """
+ for key, elem in self.options_dict.items():
+ elem['_value_set'] = False
+ try:
+ object.__setattr__(self, elem["dest"], elem["default"])
+ except AttributeError:
+ # It isn't an error not to have a "dest" variable name, you'll
+ # just have to manage this option's value on your own.
+ continue
+
+ def set_enum_value(self, enum_values, input):
+ """ This sets the value for an enum option, you should not have to call this
+ by hand. """
+ candidates = []
+ for candidate in enum_values:
+ # The enum_values are a two element list of value & help string.
+ value = candidate[0]
+ if value.startswith(input):
+ candidates.append(value)
+
+ if len(candidates) == 1:
+ return (candidates[0], False)
+ else:
+ return (input, True)
+
+ def set_option_value(self, exe_ctx, opt_name, opt_value):
+ """ This sets a single option value. This will handle most option
+ value types, but if you have an option that has some complex behavior,
+ you can override this to implement that behavior, and then pass the
+ rest of the options to the base class implementation. """
+ elem = self.get_option_element(opt_name)
+ if not elem:
+ return False
+
+ if "enum_values" in elem:
+ (value, error) = self.set_enum_value(elem["enum_values"], opt_value)
+ else:
+ (value, error) = translate_value(elem["value_type"], opt_value)
+
+ if error:
+ return False
+
+ object.__setattr__(self, elem["dest"], value)
+ elem["_value_set"] = True
+ return True
+
+ def was_set(self, opt_name):
+ """ Call this in the __call__ method of your command to determine
+ whether this option was set on the command line. It is sometimes
+ useful to know whether an option has the default value because the
+ user set it explicitly (was_set -> True) or not. """
+
+ elem = self.get_option_element(opt_name)
+ if not elem:
+ return False
+ try:
+ return elem["_value_set"]
+ except AttributeError:
+ return False
+
+ def add_option(self, short_option, long_option, help, default,
+ dest = None, required=False, groups = None,
+ value_type=lldb.eArgTypeNone, completion_type=None,
+ enum_values=None):
+ """
+ short_option: one character, must be unique, not required
+ long_option: no spaces, must be unique, required
+ help: a usage string for this option, will print in the command help
+ default: the initial value for this option (if it has a value)
+ dest: the name of the property that gives you access to the value for
+ this value. Defaults to the long option if not provided.
+ required: if true, this option must be provided or the command will error out
+ groups: Which "option groups" does this option belong to
+ value_type: one of the lldb.eArgType enum values. Some of the common arg
+ types also have default completers, which will be applied automatically.
+ completion_type: currently these are values form the lldb.CompletionType enum, I
+ haven't done custom completions yet.
+ enum_values: An array of duples: ["element_name", "element_help"]. If provided,
+ only one of the enum elements is allowed. The value will be the
+ element_name for the chosen enum element as a string.
+ """
+ if not dest:
+ dest = long_option
+
+ if not completion_type:
+ completion_type = self.determine_completion(value_type)
+
+ dict = {"short_option" : short_option,
+ "required" : required,
+ "help" : help,
+ "value_type" : value_type,
+ "completion_type" : completion_type,
+ "dest" : dest,
+ "default" : default}
+
+ if enum_values:
+ dict["enum_values"] = enum_values
+ if groups:
+ dict["groups"] = groups
+
+ self.options_dict[long_option] = dict
+
+ def make_argument_element(self, arg_type, repeat = "optional", groups = None):
+ element = {"arg_type" : arg_type, "repeat" : repeat}
+ if groups:
+ element["groups"] = groups
+ return element
+
+class ParsedCommand:
+ def __init__(self, debugger, unused):
+ self.debugger = debugger
+ self.ov_parser = LLDBOptionValueParser()
+ self.setup_command_definition()
+
+ def get_options_definition(self):
+ return self.get_parser().options_dict
+
+ def get_flags(self):
+ return 0
+
+ def get_args_definition(self):
+ return self.get_parser().args_array
+
+ # The base class will handle calling these methods
+ # when appropriate.
+
+ def option_parsing_started(self):
+ self.get_parser().option_parsing_started()
+
+ def set_option_value(self, exe_ctx, opt_name, opt_value):
+ return self.get_parser().set_option_value(exe_ctx, opt_name, opt_value)
+
+ def get_parser(self):
+ """Returns the option value parser for this command.
+ When defining the command, use the parser to add
+ argument and option definitions to the command.
+ When you are in the command callback, the parser
+ gives you access to the options passes to this
+ invocation"""
+
+ return self.ov_parser
+
+ # These are the two "pure virtual" methods:
+ @abstractmethod
+ def __call__(self, debugger, args_array, exe_ctx, result):
+ """This is the command callback. The option values are
+ provided by the 'dest' properties on the parser.
+
+ args_array: This is the list of arguments provided.
+ exe_ctx: Gives the SBExecutionContext on which the
+ command should operate.
+ result: Any results of the command should be
+ written into this SBCommandReturnObject.
+ """
+ raise NotImplementedError()
+
+ @abstractmethod
+ def setup_command_definition(self):
+ """This will be called when your command is added to
+ the command interpreter. Here is where you add your
+ options and argument definitions for the command."""
+ raise NotImplementedError()
+
+ @staticmethod
+ def do_register_cmd(cls, debugger, module_name):
+ """ Add any commands contained in this module to LLDB """
+ command = "command script add -o -p -c %s.%s %s" % (
+ module_name,
+ cls.__name__,
+ cls.program,
+ )
+ debugger.HandleCommand(command)
+ print(
+ 'The "{0}" command has been installed, type "help {0}"'
+ 'for detailed help.'.format(cls.program)
+ )
diff --git a/lldb/include/lldb/Interpreter/CommandObject.h b/lldb/include/lldb/Interpreter/CommandObject.h
index 7b427de..b99de56 100644
--- a/lldb/include/lldb/Interpreter/CommandObject.h
+++ b/lldb/include/lldb/Interpreter/CommandObject.h
@@ -224,7 +224,10 @@ public:
void GetFormattedCommandArguments(Stream &str,
uint32_t opt_set_mask = LLDB_OPT_SET_ALL);
- bool IsPairType(ArgumentRepetitionType arg_repeat_type);
+ static bool IsPairType(ArgumentRepetitionType arg_repeat_type);
+
+ static std::optional<ArgumentRepetitionType>
+ ArgRepetitionFromString(llvm::StringRef string);
bool ParseOptions(Args &args, CommandReturnObject &result);
diff --git a/lldb/include/lldb/Interpreter/ScriptInterpreter.h b/lldb/include/lldb/Interpreter/ScriptInterpreter.h
index b941f60..932eaa8 100644
--- a/lldb/include/lldb/Interpreter/ScriptInterpreter.h
+++ b/lldb/include/lldb/Interpreter/ScriptInterpreter.h
@@ -473,6 +473,14 @@ public:
return false;
}
+ virtual bool RunScriptBasedParsedCommand(
+ StructuredData::GenericSP impl_obj_sp, Args& args,
+ ScriptedCommandSynchronicity synchronicity,
+ lldb_private::CommandReturnObject &cmd_retobj, Status &error,
+ const lldb_private::ExecutionContext &exe_ctx) {
+ return false;
+ }
+
virtual bool RunScriptFormatKeyword(const char *impl_function,
Process *process, std::string &output,
Status &error) {
@@ -517,6 +525,27 @@ public:
dest.clear();
return false;
}
+
+ virtual StructuredData::ObjectSP
+ GetOptionsForCommandObject(StructuredData::GenericSP cmd_obj_sp) {
+ return {};
+ }
+
+ virtual StructuredData::ObjectSP
+ GetArgumentsForCommandObject(StructuredData::GenericSP cmd_obj_sp) {
+ return {};
+ }
+
+ virtual bool SetOptionValueForCommandObject(
+ StructuredData::GenericSP cmd_obj_sp, ExecutionContext *exe_ctx,
+ llvm::StringRef long_option, llvm::StringRef value) {
+ return false;
+ }
+
+ virtual void OptionParsingStartedForCommandObject(
+ StructuredData::GenericSP cmd_obj_sp) {
+ return;
+ }
virtual uint32_t
GetFlagsForCommandObject(StructuredData::GenericSP cmd_obj_sp) {
diff --git a/lldb/include/lldb/Target/StopInfo.h b/lldb/include/lldb/Target/StopInfo.h
index 305fc5d..d1848fc 100644
--- a/lldb/include/lldb/Target/StopInfo.h
+++ b/lldb/include/lldb/Target/StopInfo.h
@@ -79,6 +79,11 @@ public:
virtual bool IsValidForOperatingSystemThread(Thread &thread) { return true; }
+ /// A Continue operation can result in a false stop event
+ /// before any execution has happened. We need to detect this
+ /// and silently continue again one more time.
+ virtual bool WasContinueInterrupted(Thread &thread) { return false; }
+
// Sometimes the thread plan logic will know that it wants a given stop to
// stop or not, regardless of what the ordinary logic for that StopInfo would
// dictate. The main example of this is the ThreadPlanCallFunction, which
diff --git a/lldb/include/lldb/Target/Thread.h b/lldb/include/lldb/Target/Thread.h
index e423dd4..96ca95ad 100644
--- a/lldb/include/lldb/Target/Thread.h
+++ b/lldb/include/lldb/Target/Thread.h
@@ -11,6 +11,7 @@
#include <memory>
#include <mutex>
+#include <optional>
#include <string>
#include <vector>
@@ -390,6 +391,13 @@ public:
/// and having the thread call the SystemRuntime again.
virtual bool ThreadHasQueueInformation() const { return false; }
+ /// GetStackFrameCount can be expensive. Stacks can get very deep, and they
+ /// require memory reads for each frame. So only use GetStackFrameCount when
+ /// you need to know the depth of the stack. When iterating over frames, its
+ /// better to generate the frames one by one with GetFrameAtIndex, and when
+ /// that returns NULL, you are at the end of the stack. That way your loop
+ /// will only do the work it needs to, without forcing lldb to realize
+ /// StackFrames you weren't going to look at.
virtual uint32_t GetStackFrameCount() {
return GetStackFrameList()->GetNumFrames();
}
@@ -1219,6 +1227,16 @@ public:
lldb::ValueObjectSP GetSiginfoValue();
+ /// Request the pc value the thread had when previously stopped.
+ ///
+ /// When the thread performs execution, it copies the current RegisterContext
+ /// GetPC() value. This method returns that value, if it is available.
+ ///
+ /// \return
+ /// The PC value before execution was resumed. May not be available;
+ /// an empty std::optional is returned in that case.
+ std::optional<lldb::addr_t> GetPreviousFrameZeroPC();
+
protected:
friend class ThreadPlan;
friend class ThreadList;
@@ -1299,6 +1317,9 @@ protected:
///populated after a thread stops.
lldb::StackFrameListSP m_prev_frames_sp; ///< The previous stack frames from
///the last time this thread stopped.
+ std::optional<lldb::addr_t>
+ m_prev_framezero_pc; ///< Frame 0's PC the last
+ /// time this thread was stopped.
int m_resume_signal; ///< The signal that should be used when continuing this
///thread.
lldb::StateType m_resume_state; ///< This state is used to force a thread to
diff --git a/lldb/packages/Python/lldbsuite/test/concurrent_base.py b/lldb/packages/Python/lldbsuite/test/concurrent_base.py
index 39eb27f..46d7166 100644
--- a/lldb/packages/Python/lldbsuite/test/concurrent_base.py
+++ b/lldb/packages/Python/lldbsuite/test/concurrent_base.py
@@ -264,12 +264,40 @@ class ConcurrentEventsBase(TestBase):
"Expected main thread (finish) breakpoint to be hit once",
)
- num_threads = self.inferior_process.GetNumThreads()
+ # There should be a single active thread (the main one) which hit
+ # the breakpoint after joining. Depending on the pthread
+ # implementation we may have a worker thread finishing the pthread_join()
+ # after it has returned. Filter the threads to only count those
+ # with user functions on them from our test case file,
+ # lldb/test/API/functionalities/thread/concurrent_events/main.cpp
+ user_code_funcnames = [
+ "breakpoint_func",
+ "crash_func",
+ "do_action_args",
+ "dotest",
+ "main",
+ "register_signal_handler",
+ "signal_func",
+ "sigusr1_handler",
+ "start_threads",
+ "watchpoint_func",
+ ]
+ num_threads_with_usercode = 0
+ for t in self.inferior_process.threads:
+ thread_has_user_code = False
+ for f in t.frames:
+ for funcname in user_code_funcnames:
+ if funcname in f.GetDisplayFunctionName():
+ thread_has_user_code = True
+ break
+ if thread_has_user_code:
+ num_threads_with_usercode += 1
+
self.assertEqual(
1,
- num_threads,
+ num_threads_with_usercode,
"Expecting 1 thread but seeing %d. Details:%s"
- % (num_threads, "\n\t".join(self.describe_threads())),
+ % (num_threads_with_usercode, "\n\t".join(self.describe_threads())),
)
self.runCmd("continue")
diff --git a/lldb/packages/Python/lldbsuite/test/configuration.py b/lldb/packages/Python/lldbsuite/test/configuration.py
index 2a4b9b3..685f491 100644
--- a/lldb/packages/Python/lldbsuite/test/configuration.py
+++ b/lldb/packages/Python/lldbsuite/test/configuration.py
@@ -12,14 +12,14 @@ import os
# Third-party modules
-import unittest2
+import unittest
# LLDB Modules
import lldbsuite
# The test suite.
-suite = unittest2.TestSuite()
+suite = unittest.TestSuite()
# The list of categories we said we care about
categories_list = None
diff --git a/lldb/packages/Python/lldbsuite/test/decorators.py b/lldb/packages/Python/lldbsuite/test/decorators.py
index 0fb1469..86594c2 100644
--- a/lldb/packages/Python/lldbsuite/test/decorators.py
+++ b/lldb/packages/Python/lldbsuite/test/decorators.py
@@ -11,7 +11,7 @@ import tempfile
import subprocess
# Third-party modules
-import unittest2
+import unittest
# LLDB modules
import lldb
@@ -115,11 +115,11 @@ def _compiler_supports(
def expectedFailureIf(condition, bugnumber=None):
def expectedFailure_impl(func):
- if isinstance(func, type) and issubclass(func, unittest2.TestCase):
+ if isinstance(func, type) and issubclass(func, unittest.TestCase):
raise Exception("Decorator can only be used to decorate a test method")
if condition:
- return unittest2.expectedFailure(func)
+ return unittest.expectedFailure(func)
return func
if callable(bugnumber):
@@ -128,40 +128,13 @@ def expectedFailureIf(condition, bugnumber=None):
return expectedFailure_impl
-def expectedFailureIfFn(expected_fn, bugnumber=None):
- def expectedFailure_impl(func):
- if isinstance(func, type) and issubclass(func, unittest2.TestCase):
- raise Exception("Decorator can only be used to decorate a test method")
-
- @wraps(func)
- def wrapper(*args, **kwargs):
- xfail_reason = expected_fn(*args, **kwargs)
- if xfail_reason is not None:
- xfail_func = unittest2.expectedFailure(func)
- xfail_func(*args, **kwargs)
- else:
- func(*args, **kwargs)
-
- return wrapper
-
- # Some decorators can be called both with no arguments (e.g. @expectedFailureWindows)
- # or with arguments (e.g. @expectedFailureWindows(compilers=['gcc'])). When called
- # the first way, the first argument will be the actual function because decorators are
- # weird like that. So this is basically a check that says "which syntax was the original
- # function decorated with?"
- if callable(bugnumber):
- return expectedFailure_impl(bugnumber)
- else:
- return expectedFailure_impl
-
-
def skipTestIfFn(expected_fn, bugnumber=None):
def skipTestIfFn_impl(func):
- if isinstance(func, type) and issubclass(func, unittest2.TestCase):
+ if isinstance(func, type) and issubclass(func, unittest.TestCase):
reason = expected_fn()
# The return value is the reason (or None if we don't skip), so
# reason is used for both args.
- return unittest2.skipIf(condition=reason, reason=reason)(func)
+ return unittest.skipIf(condition=reason, reason=reason)(func)
@wraps(func)
def wrapper(*args, **kwargs):
@@ -191,7 +164,7 @@ def skipTestIfFn(expected_fn, bugnumber=None):
def _xfailForDebugInfo(expected_fn, bugnumber=None):
def expectedFailure_impl(func):
- if isinstance(func, type) and issubclass(func, unittest2.TestCase):
+ if isinstance(func, type) and issubclass(func, unittest.TestCase):
raise Exception("Decorator can only be used to decorate a test method")
func.__xfail_for_debug_info_cat_fn__ = expected_fn
@@ -205,7 +178,7 @@ def _xfailForDebugInfo(expected_fn, bugnumber=None):
def _skipForDebugInfo(expected_fn, bugnumber=None):
def skipImpl(func):
- if isinstance(func, type) and issubclass(func, unittest2.TestCase):
+ if isinstance(func, type) and issubclass(func, unittest.TestCase):
raise Exception("Decorator can only be used to decorate a test method")
func.__skip_for_debug_info_cat_fn__ = expected_fn
@@ -417,8 +390,8 @@ def skipIf(
)
-def _skip_for_android(reason, api_levels, archs):
- def impl(obj):
+def _skip_fn_for_android(reason, api_levels, archs):
+ def impl():
result = lldbplatformutil.match_android_device(
lldbplatformutil.getArchitecture(),
valid_archs=archs,
@@ -434,7 +407,7 @@ def add_test_categories(cat):
cat = test_categories.validate(cat, True)
def impl(func):
- if isinstance(func, type) and issubclass(func, unittest2.TestCase):
+ if isinstance(func, type) and issubclass(func, unittest.TestCase):
raise Exception(
"@add_test_categories can only be used to decorate a test method"
)
@@ -465,7 +438,7 @@ def benchmarks_test(func):
def no_debug_info_test(func):
"""Decorate the item as a test what don't use any debug info. If this annotation is specified
then the test runner won't generate a separate test for each debug info format."""
- if isinstance(func, type) and issubclass(func, unittest2.TestCase):
+ if isinstance(func, type) and issubclass(func, unittest.TestCase):
raise Exception(
"@no_debug_info_test can only be used to decorate a test method"
)
@@ -549,8 +522,8 @@ def expectedFailureAndroid(bugnumber=None, api_levels=None, archs=None):
arch - A sequence of architecture names specifying the architectures
for which a test is expected to fail. None means all architectures.
"""
- return expectedFailureIfFn(
- _skip_for_android("xfailing on android", api_levels, archs), bugnumber
+ return expectedFailureIf(
+ _skip_fn_for_android("xfailing on android", api_levels, archs)(), bugnumber
)
@@ -612,7 +585,7 @@ def expectedFlakeyNetBSD(bugnumber=None, compilers=None):
def expectedFlakeyAndroid(bugnumber=None, api_levels=None, archs=None):
return expectedFlakey(
- _skip_for_android("flakey on android", api_levels, archs), bugnumber
+ _skip_fn_for_android("flakey on android", api_levels, archs), bugnumber
)
@@ -631,7 +604,7 @@ def skipIfOutOfTreeDebugserver(func):
def skipIfRemote(func):
"""Decorate the item to skip tests if testing remotely."""
- return unittest2.skipIf(lldb.remote_platform, "skip on remote platform")(func)
+ return unittest.skipIf(lldb.remote_platform, "skip on remote platform")(func)
def skipIfNoSBHeaders(func):
@@ -768,7 +741,7 @@ def skipUnlessDarwin(func):
def skipUnlessTargetAndroid(func):
- return unittest2.skipUnless(
+ return unittest.skipUnless(
lldbplatformutil.target_is_android(), "requires target to be Android"
)(func)
@@ -809,7 +782,7 @@ def skipIfPlatform(oslist):
"""Decorate the item to skip tests if running on one of the listed platforms."""
# This decorator cannot be ported to `skipIf` yet because it is used on entire
# classes, which `skipIf` explicitly forbids.
- return unittest2.skipIf(
+ return unittest.skipIf(
lldbplatformutil.getPlatform() in oslist, "skip on %s" % (", ".join(oslist))
)
@@ -818,7 +791,7 @@ def skipUnlessPlatform(oslist):
"""Decorate the item to skip tests unless running on one of the listed platforms."""
# This decorator cannot be ported to `skipIf` yet because it is used on entire
# classes, which `skipIf` explicitly forbids.
- return unittest2.skipUnless(
+ return unittest.skipUnless(
lldbplatformutil.getPlatform() in oslist,
"requires one of %s" % (", ".join(oslist)),
)
@@ -846,7 +819,7 @@ def skipIfTargetAndroid(bugnumber=None, api_levels=None, archs=None):
for which a test is skipped. None means all architectures.
"""
return skipTestIfFn(
- _skip_for_android("skipping for android", api_levels, archs), bugnumber
+ _skip_fn_for_android("skipping for android", api_levels, archs), bugnumber
)
@@ -1078,7 +1051,7 @@ def _get_bool_config(key, fail_value=True):
def _get_bool_config_skip_if_decorator(key):
have = _get_bool_config(key)
- return unittest2.skipIf(not have, "requires " + key)
+ return unittest.skipIf(not have, "requires " + key)
def skipIfCursesSupportMissing(func):
@@ -1110,7 +1083,7 @@ def skipIfLLVMTargetMissing(target):
found = True
break
- return unittest2.skipIf(not found, "requires " + target)
+ return unittest.skipIf(not found, "requires " + target)
# Call sysctl on darwin to see if a specified hardware feature is available on this machine.
diff --git a/lldb/packages/Python/lldbsuite/test/dotest.py b/lldb/packages/Python/lldbsuite/test/dotest.py
index 4393e0c..291d7ba 100644
--- a/lldb/packages/Python/lldbsuite/test/dotest.py
+++ b/lldb/packages/Python/lldbsuite/test/dotest.py
@@ -33,7 +33,7 @@ import sys
import tempfile
# Third-party modules
-import unittest2
+import unittest
# LLDB Modules
import lldbsuite
@@ -658,7 +658,7 @@ def visit_file(dir, name):
for filterspec in iter_filters():
filtered = True
print("adding filter spec %s to module %s" % (filterspec, repr(module)))
- tests = unittest2.defaultTestLoader.loadTestsFromName(filterspec, module)
+ tests = unittest.defaultTestLoader.loadTestsFromName(filterspec, module)
configuration.suite.addTests(tests)
# Forgo this module if the (base, filterspec) combo is invalid
@@ -669,9 +669,7 @@ def visit_file(dir, name):
# Add the entire file's worth of tests since we're not filtered.
# Also the fail-over case when the filterspec branch
# (base, filterspec) combo doesn't make sense.
- configuration.suite.addTests(
- unittest2.defaultTestLoader.loadTestsFromName(base)
- )
+ configuration.suite.addTests(unittest.defaultTestLoader.loadTestsFromName(base))
def visit(prefix, dir, names):
@@ -1032,7 +1030,7 @@ def run_suite():
#
# Install the control-c handler.
- unittest2.signals.installHandler()
+ unittest.signals.installHandler()
#
# Invoke the default TextTestRunner to run the test suite
@@ -1066,7 +1064,7 @@ def run_suite():
# Invoke the test runner.
if configuration.count == 1:
- result = unittest2.TextTestRunner(
+ result = unittest.TextTestRunner(
stream=sys.stderr,
verbosity=configuration.verbose,
resultclass=test_result.LLDBTestResult,
@@ -1077,7 +1075,7 @@ def run_suite():
# not enforced.
test_result.LLDBTestResult.__ignore_singleton__ = True
for i in range(configuration.count):
- result = unittest2.TextTestRunner(
+ result = unittest.TextTestRunner(
stream=sys.stderr,
verbosity=configuration.verbose,
resultclass=test_result.LLDBTestResult,
diff --git a/lldb/packages/Python/lldbsuite/test/lldbtest.py b/lldb/packages/Python/lldbsuite/test/lldbtest.py
index d944b09..018f2a0 100644
--- a/lldb/packages/Python/lldbsuite/test/lldbtest.py
+++ b/lldb/packages/Python/lldbsuite/test/lldbtest.py
@@ -44,7 +44,7 @@ import time
import traceback
# Third-party modules
-import unittest2
+import unittest
# LLDB modules
import lldb
@@ -517,7 +517,7 @@ def builder_module():
return lldbplatformutil.builder_module()
-class Base(unittest2.TestCase):
+class Base(unittest.TestCase):
"""
Abstract base for performing lldb (see TestBase) or other generic tests (see
BenchBase for one example). lldbtest.Base works with the test driver to
@@ -1090,17 +1090,14 @@ class Base(unittest2.TestCase):
# Once by the Python unittest framework, and a second time by us.
print("FAIL", file=sbuf)
- def markExpectedFailure(self, err, bugnumber):
+ def markExpectedFailure(self, err):
"""Callback invoked when an expected failure/error occurred."""
self.__expected__ = True
with recording(self, False) as sbuf:
# False because there's no need to write "expected failure" to the
# stderr twice.
# Once by the Python unittest framework, and a second time by us.
- if bugnumber is None:
- print("expected failure", file=sbuf)
- else:
- print("expected failure (problem id:" + str(bugnumber) + ")", file=sbuf)
+ print("expected failure", file=sbuf)
def markSkippedTest(self):
"""Callback invoked when a test is skipped."""
@@ -1111,19 +1108,14 @@ class Base(unittest2.TestCase):
# Once by the Python unittest framework, and a second time by us.
print("skipped test", file=sbuf)
- def markUnexpectedSuccess(self, bugnumber):
+ def markUnexpectedSuccess(self):
"""Callback invoked when an unexpected success occurred."""
self.__unexpected__ = True
with recording(self, False) as sbuf:
# False because there's no need to write "unexpected success" to the
# stderr twice.
# Once by the Python unittest framework, and a second time by us.
- if bugnumber is None:
- print("unexpected success", file=sbuf)
- else:
- print(
- "unexpected success (problem id:" + str(bugnumber) + ")", file=sbuf
- )
+ print("unexpected success", file=sbuf)
def getRerunArgs(self):
return " -f %s.%s" % (self.__class__.__name__, self._testMethodName)
@@ -1704,13 +1696,11 @@ class LLDBTestCaseFactory(type):
xfail_reason = xfail_for_debug_info_cat_fn(cat)
if xfail_reason:
- test_method = unittest2.expectedFailure(xfail_reason)(
- test_method
- )
+ test_method = unittest.expectedFailure(test_method)
skip_reason = skip_for_debug_info_cat_fn(cat)
if skip_reason:
- test_method = unittest2.skip(skip_reason)(test_method)
+ test_method = unittest.skip(skip_reason)(test_method)
newattrs[method_name] = test_method
@@ -2226,7 +2216,7 @@ class TestBase(Base, metaclass=LLDBTestCaseFactory):
match_strings = lldb.SBStringList()
interp.HandleCompletion(command, len(command), 0, -1, match_strings)
# match_strings is a 1-indexed list, so we have to slice...
- self.assertItemsEqual(
+ self.assertCountEqual(
completions, list(match_strings)[1:], "List of returned completion is wrong"
)
diff --git a/lldb/packages/Python/lldbsuite/test/test_result.py b/lldb/packages/Python/lldbsuite/test/test_result.py
index cb84c90..20365f5 100644
--- a/lldb/packages/Python/lldbsuite/test/test_result.py
+++ b/lldb/packages/Python/lldbsuite/test/test_result.py
@@ -12,14 +12,14 @@ import os
import traceback
# Third-party modules
-import unittest2
+import unittest
# LLDB Modules
from . import configuration
from lldbsuite.test_event import build_exception
-class LLDBTestResult(unittest2.TextTestResult):
+class LLDBTestResult(unittest.TextTestResult):
"""
Enforce a singleton pattern to allow introspection of test progress.
@@ -243,7 +243,7 @@ class LLDBTestResult(unittest2.TextTestResult):
if self.checkExclusion(
configuration.xfail_tests, test.id()
) or self.checkCategoryExclusion(configuration.xfail_categories, test):
- self.addExpectedFailure(test, err, None)
+ self.addExpectedFailure(test, err)
return
configuration.sdir_has_content = True
@@ -264,12 +264,12 @@ class LLDBTestResult(unittest2.TextTestResult):
else:
configuration.failures_per_category[category] = 1
- def addExpectedFailure(self, test, err, bugnumber):
+ def addExpectedFailure(self, test, err):
configuration.sdir_has_content = True
- super(LLDBTestResult, self).addExpectedFailure(test, err, bugnumber)
+ super(LLDBTestResult, self).addExpectedFailure(test, err)
method = getattr(test, "markExpectedFailure", None)
if method:
- method(err, bugnumber)
+ method(err)
self.stream.write(
"XFAIL: LLDB (%s) :: %s\n" % (self._config_string(test), str(test))
)
@@ -285,12 +285,12 @@ class LLDBTestResult(unittest2.TextTestResult):
% (self._config_string(test), str(test), reason)
)
- def addUnexpectedSuccess(self, test, bugnumber):
+ def addUnexpectedSuccess(self, test):
configuration.sdir_has_content = True
- super(LLDBTestResult, self).addUnexpectedSuccess(test, bugnumber)
+ super(LLDBTestResult, self).addUnexpectedSuccess(test)
method = getattr(test, "markUnexpectedSuccess", None)
if method:
- method(bugnumber)
+ method()
self.stream.write(
"XPASS: LLDB (%s) :: %s\n" % (self._config_string(test), str(test))
)
diff --git a/lldb/source/Commands/CommandObjectCommands.cpp b/lldb/source/Commands/CommandObjectCommands.cpp
index a51e5ab..b7cd650 100644
--- a/lldb/source/Commands/CommandObjectCommands.cpp
+++ b/lldb/source/Commands/CommandObjectCommands.cpp
@@ -1151,13 +1151,16 @@ private:
CompletionType m_completion_type = eNoCompletion;
};
-class CommandObjectScriptingObject : public CommandObjectRaw {
+/// This class implements a "raw" scripted command. lldb does no parsing of the
+/// command line, instead passing the line unaltered (except for backtick
+/// substitution).
+class CommandObjectScriptingObjectRaw : public CommandObjectRaw {
public:
- CommandObjectScriptingObject(CommandInterpreter &interpreter,
- std::string name,
- StructuredData::GenericSP cmd_obj_sp,
- ScriptedCommandSynchronicity synch,
- CompletionType completion_type)
+ CommandObjectScriptingObjectRaw(CommandInterpreter &interpreter,
+ std::string name,
+ StructuredData::GenericSP cmd_obj_sp,
+ ScriptedCommandSynchronicity synch,
+ CompletionType completion_type)
: CommandObjectRaw(interpreter, name), m_cmd_obj_sp(cmd_obj_sp),
m_synchro(synch), m_fetched_help_short(false),
m_fetched_help_long(false), m_completion_type(completion_type) {
@@ -1168,7 +1171,7 @@ public:
GetFlags().Set(scripter->GetFlagsForCommandObject(cmd_obj_sp));
}
- ~CommandObjectScriptingObject() override = default;
+ ~CommandObjectScriptingObjectRaw() override = default;
void
HandleArgumentCompletion(CompletionRequest &request,
@@ -1246,6 +1249,699 @@ private:
CompletionType m_completion_type = eNoCompletion;
};
+
+/// This command implements a lldb parsed scripted command. The command
+/// provides a definition of the options and arguments, and a option value
+/// setting callback, and then the command's execution function gets passed
+/// just the parsed arguments.
+/// Note, implementing a command in Python using these base interfaces is a bit
+/// of a pain, but it is much easier to export this low level interface, and
+/// then make it nicer on the Python side, than to try to do that in a
+/// script language neutral way.
+/// So I've also added a base class in Python that provides a table-driven
+/// way of defining the options and arguments, which automatically fills the
+/// option values, making them available as properties in Python.
+///
+class CommandObjectScriptingObjectParsed : public CommandObjectParsed {
+private:
+ class CommandOptions : public Options {
+ public:
+ CommandOptions(CommandInterpreter &interpreter,
+ StructuredData::GenericSP cmd_obj_sp) : m_interpreter(interpreter),
+ m_cmd_obj_sp(cmd_obj_sp) {}
+
+ ~CommandOptions() override = default;
+
+ Status SetOptionValue(uint32_t option_idx, llvm::StringRef option_arg,
+ ExecutionContext *execution_context) override {
+ Status error;
+ ScriptInterpreter *scripter =
+ m_interpreter.GetDebugger().GetScriptInterpreter();
+ if (!scripter) {
+ error.SetErrorString("No script interpreter for SetOptionValue.");
+ return error;
+ }
+ if (!m_cmd_obj_sp) {
+ error.SetErrorString("SetOptionValue called with empty cmd_obj.");
+ return error;
+ }
+ if (!m_options_definition_up) {
+ error.SetErrorString("SetOptionValue called before options definitions "
+ "were created.");
+ return error;
+ }
+ // Pass the long option, since you aren't actually required to have a
+ // short_option, and for those options the index or short option character
+ // aren't meaningful on the python side.
+ const char * long_option =
+ m_options_definition_up.get()[option_idx].long_option;
+ bool success = scripter->SetOptionValueForCommandObject(m_cmd_obj_sp,
+ execution_context, long_option, option_arg);
+ if (!success)
+ error.SetErrorStringWithFormatv("Error setting option: {0} to {1}",
+ long_option, option_arg);
+ return error;
+ }
+
+ void OptionParsingStarting(ExecutionContext *execution_context) override {
+ ScriptInterpreter *scripter =
+ m_interpreter.GetDebugger().GetScriptInterpreter();
+ if (!scripter || !m_cmd_obj_sp)
+ return;
+
+ scripter->OptionParsingStartedForCommandObject(m_cmd_obj_sp);
+ }
+
+ llvm::ArrayRef<OptionDefinition> GetDefinitions() override {
+ if (!m_options_definition_up)
+ return {};
+ return llvm::ArrayRef(m_options_definition_up.get(), m_num_options);
+ }
+
+ static Status ParseUsageMaskFromArray(StructuredData::ObjectSP obj_sp,
+ size_t counter, uint32_t &usage_mask) {
+ // If the usage entry is not provided, we use LLDB_OPT_SET_ALL.
+ // If the usage mask is a UINT, the option belongs to that group.
+ // If the usage mask is a vector of UINT's, the option belongs to all the
+ // groups listed.
+ // If a subelement of the vector is a vector of two ints, then the option
+ // belongs to the inclusive range from the first to the second element.
+ Status error;
+ if (!obj_sp) {
+ usage_mask = LLDB_OPT_SET_ALL;
+ return error;
+ }
+
+ usage_mask = 0;
+
+ StructuredData::UnsignedInteger *uint_val =
+ obj_sp->GetAsUnsignedInteger();
+ if (uint_val) {
+ // If this is an integer, then this specifies a single group:
+ uint32_t value = uint_val->GetValue();
+ if (value == 0) {
+ error.SetErrorStringWithFormatv(
+ "0 is not a valid group for option {0}", counter);
+ return error;
+ }
+ usage_mask = (1 << (value - 1));
+ return error;
+ }
+ // Otherwise it has to be an array:
+ StructuredData::Array *array_val = obj_sp->GetAsArray();
+ if (!array_val) {
+ error.SetErrorStringWithFormatv(
+ "required field is not a array for option {0}", counter);
+ return error;
+ }
+ // This is the array ForEach for accumulating a group usage mask from
+ // an array of string descriptions of groups.
+ auto groups_accumulator
+ = [counter, &usage_mask, &error]
+ (StructuredData::Object *obj) -> bool {
+ StructuredData::UnsignedInteger *int_val = obj->GetAsUnsignedInteger();
+ if (int_val) {
+ uint32_t value = int_val->GetValue();
+ if (value == 0) {
+ error.SetErrorStringWithFormatv(
+ "0 is not a valid group for element {0}", counter);
+ return false;
+ }
+ usage_mask |= (1 << (value - 1));
+ return true;
+ }
+ StructuredData::Array *arr_val = obj->GetAsArray();
+ if (!arr_val) {
+ error.SetErrorStringWithFormatv(
+ "Group element not an int or array of integers for element {0}",
+ counter);
+ return false;
+ }
+ size_t num_range_elem = arr_val->GetSize();
+ if (num_range_elem != 2) {
+ error.SetErrorStringWithFormatv(
+ "Subranges of a group not a start and a stop for element {0}",
+ counter);
+ return false;
+ }
+ int_val = arr_val->GetItemAtIndex(0)->GetAsUnsignedInteger();
+ if (!int_val) {
+ error.SetErrorStringWithFormatv("Start element of a subrange of a "
+ "group not unsigned int for element {0}", counter);
+ return false;
+ }
+ uint32_t start = int_val->GetValue();
+ int_val = arr_val->GetItemAtIndex(1)->GetAsUnsignedInteger();
+ if (!int_val) {
+ error.SetErrorStringWithFormatv("End element of a subrange of a group"
+ " not unsigned int for element {0}", counter);
+ return false;
+ }
+ uint32_t end = int_val->GetValue();
+ if (start == 0 || end == 0 || start > end) {
+ error.SetErrorStringWithFormatv("Invalid subrange of a group: {0} - "
+ "{1} for element {2}", start, end, counter);
+ return false;
+ }
+ for (uint32_t i = start; i <= end; i++) {
+ usage_mask |= (1 << (i - 1));
+ }
+ return true;
+ };
+ array_val->ForEach(groups_accumulator);
+ return error;
+ }
+
+
+ Status SetOptionsFromArray(StructuredData::Dictionary &options) {
+ Status error;
+ m_num_options = options.GetSize();
+ m_options_definition_up.reset(new OptionDefinition[m_num_options]);
+ // We need to hand out pointers to contents of these vectors; we reserve
+ // as much as we'll need up front so they don't get freed on resize...
+ m_usage_container.resize(m_num_options);
+ m_enum_storage.resize(m_num_options);
+ m_enum_vector.resize(m_num_options);
+
+ size_t counter = 0;
+ size_t short_opt_counter = 0;
+ // This is the Array::ForEach function for adding option elements:
+ auto add_element = [this, &error, &counter, &short_opt_counter]
+ (llvm::StringRef long_option, StructuredData::Object *object) -> bool {
+ StructuredData::Dictionary *opt_dict = object->GetAsDictionary();
+ if (!opt_dict) {
+ error.SetErrorString("Value in options dictionary is not a dictionary");
+ return false;
+ }
+ OptionDefinition &option_def = m_options_definition_up.get()[counter];
+
+ // We aren't exposing the validator yet, set it to null
+ option_def.validator = nullptr;
+ // We don't require usage masks, so set it to one group by default:
+ option_def.usage_mask = 1;
+
+ // Now set the fields of the OptionDefinition Array from the dictionary:
+ //
+ // Note that I don't check for unknown fields in the option dictionaries
+ // so a scriptor can add extra elements that are helpful when they go to
+ // do "set_option_value"
+
+ // Usage Mask:
+ StructuredData::ObjectSP obj_sp = opt_dict->GetValueForKey("groups");
+ if (obj_sp) {
+ error = ParseUsageMaskFromArray(obj_sp, counter,
+ option_def.usage_mask);
+ if (error.Fail())
+ return false;
+ }
+
+ // Required:
+ option_def.required = false;
+ obj_sp = opt_dict->GetValueForKey("required");
+ if (obj_sp) {
+ StructuredData::Boolean *boolean_val = obj_sp->GetAsBoolean();
+ if (!boolean_val) {
+ error.SetErrorStringWithFormatv("'required' field is not a boolean "
+ "for option {0}", counter);
+ return false;
+ }
+ option_def.required = boolean_val->GetValue();
+ }
+
+ // Short Option:
+ int short_option;
+ obj_sp = opt_dict->GetValueForKey("short_option");
+ if (obj_sp) {
+ // The value is a string, so pull the
+ llvm::StringRef short_str = obj_sp->GetStringValue();
+ if (short_str.empty()) {
+ error.SetErrorStringWithFormatv("short_option field empty for "
+ "option {0}", counter);
+ return false;
+ } else if (short_str.size() != 1) {
+ error.SetErrorStringWithFormatv("short_option field has extra "
+ "characters for option {0}", counter);
+ return false;
+ }
+ short_option = (int) short_str[0];
+ } else {
+ // If the short option is not provided, then we need a unique value
+ // less than the lowest printable ASCII character.
+ short_option = short_opt_counter++;
+ }
+ option_def.short_option = short_option;
+
+ // Long Option is the key from the outer dict:
+ if (long_option.empty()) {
+ error.SetErrorStringWithFormatv("empty long_option for option {0}",
+ counter);
+ return false;
+ }
+ auto inserted = g_string_storer.insert(long_option.str());
+ option_def.long_option = ((*(inserted.first)).data());
+
+ // Value Type:
+ obj_sp = opt_dict->GetValueForKey("value_type");
+ if (obj_sp) {
+ StructuredData::UnsignedInteger *uint_val
+ = obj_sp->GetAsUnsignedInteger();
+ if (!uint_val) {
+ error.SetErrorStringWithFormatv("Value type must be an unsigned "
+ "integer");
+ return false;
+ }
+ uint64_t val_type = uint_val->GetValue();
+ if (val_type >= eArgTypeLastArg) {
+ error.SetErrorStringWithFormatv("Value type {0} beyond the "
+ "CommandArgumentType bounds", val_type);
+ return false;
+ }
+ option_def.argument_type = (CommandArgumentType) val_type;
+ option_def.option_has_arg = true;
+ } else {
+ option_def.argument_type = eArgTypeNone;
+ option_def.option_has_arg = false;
+ }
+
+ // Completion Type:
+ obj_sp = opt_dict->GetValueForKey("completion_type");
+ if (obj_sp) {
+ StructuredData::UnsignedInteger *uint_val = obj_sp->GetAsUnsignedInteger();
+ if (!uint_val) {
+ error.SetErrorStringWithFormatv("Completion type must be an "
+ "unsigned integer for option {0}", counter);
+ return false;
+ }
+ uint64_t completion_type = uint_val->GetValue();
+ if (completion_type > eCustomCompletion) {
+ error.SetErrorStringWithFormatv("Completion type for option {0} "
+ "beyond the CompletionType bounds", completion_type);
+ return false;
+ }
+ option_def.completion_type = (CommandArgumentType) completion_type;
+ } else
+ option_def.completion_type = eNoCompletion;
+
+ // Usage Text:
+ std::string usage_text;
+ obj_sp = opt_dict->GetValueForKey("help");
+ if (!obj_sp) {
+ error.SetErrorStringWithFormatv("required usage missing from option "
+ "{0}", counter);
+ return false;
+ }
+ llvm::StringRef usage_stref;
+ usage_stref = obj_sp->GetStringValue();
+ if (usage_stref.empty()) {
+ error.SetErrorStringWithFormatv("empty usage text for option {0}",
+ counter);
+ return false;
+ }
+ m_usage_container[counter] = usage_stref.str().c_str();
+ option_def.usage_text = m_usage_container[counter].data();
+
+ // Enum Values:
+
+ obj_sp = opt_dict->GetValueForKey("enum_values");
+ if (obj_sp) {
+ StructuredData::Array *array = obj_sp->GetAsArray();
+ if (!array) {
+ error.SetErrorStringWithFormatv("enum values must be an array for "
+ "option {0}", counter);
+ return false;
+ }
+ size_t num_elem = array->GetSize();
+ size_t enum_ctr = 0;
+ m_enum_storage[counter] = std::vector<EnumValueStorage>(num_elem);
+ std::vector<EnumValueStorage> &curr_elem = m_enum_storage[counter];
+
+ // This is the Array::ForEach function for adding enum elements:
+ // Since there are only two fields to specify the enum, use a simple
+ // two element array with value first, usage second.
+ // counter is only used for reporting so I pass it by value here.
+ auto add_enum = [&enum_ctr, &curr_elem, counter, &error]
+ (StructuredData::Object *object) -> bool {
+ StructuredData::Array *enum_arr = object->GetAsArray();
+ if (!enum_arr) {
+ error.SetErrorStringWithFormatv("Enum values for option {0} not "
+ "an array", counter);
+ return false;
+ }
+ size_t num_enum_elements = enum_arr->GetSize();
+ if (num_enum_elements != 2) {
+ error.SetErrorStringWithFormatv("Wrong number of elements: {0} "
+ "for enum {1} in option {2}",
+ num_enum_elements, enum_ctr, counter);
+ return false;
+ }
+ // Enum Value:
+ StructuredData::ObjectSP obj_sp = enum_arr->GetItemAtIndex(0);
+ llvm::StringRef val_stref = obj_sp->GetStringValue();
+ std::string value_cstr_str = val_stref.str().c_str();
+
+ // Enum Usage:
+ obj_sp = enum_arr->GetItemAtIndex(1);
+ if (!obj_sp) {
+ error.SetErrorStringWithFormatv("No usage for enum {0} in option "
+ "{1}", enum_ctr, counter);
+ return false;
+ }
+ llvm::StringRef usage_stref = obj_sp->GetStringValue();
+ std::string usage_cstr_str = usage_stref.str().c_str();
+ curr_elem[enum_ctr] = EnumValueStorage(value_cstr_str,
+ usage_cstr_str, enum_ctr);
+
+ enum_ctr++;
+ return true;
+ }; // end of add_enum
+
+ array->ForEach(add_enum);
+ if (!error.Success())
+ return false;
+ // We have to have a vector of elements to set in the options, make
+ // that here:
+ for (auto &elem : curr_elem)
+ m_enum_vector[counter].emplace_back(elem.element);
+
+ option_def.enum_values = llvm::ArrayRef(m_enum_vector[counter]);
+ }
+ counter++;
+ return true;
+ }; // end of add_element
+
+ options.ForEach(add_element);
+ return error;
+ }
+
+ private:
+ struct EnumValueStorage {
+ EnumValueStorage() {
+ element.string_value = "value not set";
+ element.usage = "usage not set";
+ element.value = 0;
+ }
+
+ EnumValueStorage(std::string in_str_val, std::string in_usage,
+ size_t in_value) : value(std::move(in_str_val)), usage(std::move(in_usage)) {
+ SetElement(in_value);
+ }
+
+ EnumValueStorage(const EnumValueStorage &in) : value(in.value),
+ usage(in.usage) {
+ SetElement(in.element.value);
+ }
+
+ EnumValueStorage &operator=(const EnumValueStorage &in) {
+ value = in.value;
+ usage = in.usage;
+ SetElement(in.element.value);
+ return *this;
+ }
+
+ void SetElement(size_t in_value) {
+ element.value = in_value;
+ element.string_value = value.data();
+ element.usage = usage.data();
+ }
+
+ std::string value;
+ std::string usage;
+ OptionEnumValueElement element;
+ };
+ // We have to provide char * values for the long option, usage and enum
+ // values, that's what the option definitions hold.
+ // The long option strings are quite likely to be reused in other added
+ // commands, so those are stored in a global set: g_string_storer.
+ // But the usages are much less likely to be reused, so those are stored in
+ // a vector in the command instance. It gets resized to the correct size
+ // and then filled with null-terminated strings in the std::string, so the
+ // are valid C-strings that won't move around.
+ // The enum values and descriptions are treated similarly - these aren't
+ // all that common so it's not worth the effort to dedup them.
+ size_t m_num_options = 0;
+ std::unique_ptr<OptionDefinition> m_options_definition_up;
+ std::vector<std::vector<EnumValueStorage>> m_enum_storage;
+ std::vector<std::vector<OptionEnumValueElement>> m_enum_vector;
+ std::vector<std::string> m_usage_container;
+ CommandInterpreter &m_interpreter;
+ StructuredData::GenericSP m_cmd_obj_sp;
+ static std::unordered_set<std::string> g_string_storer;
+ };
+
+public:
+ static CommandObjectSP Create(CommandInterpreter &interpreter,
+ std::string name,
+ StructuredData::GenericSP cmd_obj_sp,
+ ScriptedCommandSynchronicity synch,
+ CommandReturnObject &result) {
+ CommandObjectSP new_cmd_sp(new CommandObjectScriptingObjectParsed(
+ interpreter, name, cmd_obj_sp, synch));
+
+ CommandObjectScriptingObjectParsed *parsed_cmd
+ = static_cast<CommandObjectScriptingObjectParsed *>(new_cmd_sp.get());
+ // Now check all the failure modes, and report if found.
+ Status opt_error = parsed_cmd->GetOptionsError();
+ Status arg_error = parsed_cmd->GetArgsError();
+
+ if (opt_error.Fail())
+ result.AppendErrorWithFormat("failed to parse option definitions: %s",
+ opt_error.AsCString());
+ if (arg_error.Fail())
+ result.AppendErrorWithFormat("%sfailed to parse argument definitions: %s",
+ opt_error.Fail() ? ", also " : "",
+ arg_error.AsCString());
+
+ if (!result.Succeeded())
+ return {};
+
+ return new_cmd_sp;
+ }
+
+ CommandObjectScriptingObjectParsed(CommandInterpreter &interpreter,
+ std::string name,
+ StructuredData::GenericSP cmd_obj_sp,
+ ScriptedCommandSynchronicity synch)
+ : CommandObjectParsed(interpreter, name.c_str()),
+ m_cmd_obj_sp(cmd_obj_sp), m_synchro(synch),
+ m_options(interpreter, cmd_obj_sp), m_fetched_help_short(false),
+ m_fetched_help_long(false) {
+ StreamString stream;
+ ScriptInterpreter *scripter = GetDebugger().GetScriptInterpreter();
+ if (!scripter) {
+ m_options_error.SetErrorString("No script interpreter");
+ return;
+ }
+
+ // Set the flags:
+ GetFlags().Set(scripter->GetFlagsForCommandObject(cmd_obj_sp));
+
+ // Now set up the options definitions from the options:
+ StructuredData::ObjectSP options_object_sp
+ = scripter->GetOptionsForCommandObject(cmd_obj_sp);
+ // It's okay not to have an options dict.
+ if (options_object_sp) {
+ // The options come as a dictionary of dictionaries. The key of the
+ // outer dict is the long option name (since that's required). The
+ // value holds all the other option specification bits.
+ StructuredData::Dictionary *options_dict
+ = options_object_sp->GetAsDictionary();
+ // but if it exists, it has to be an array.
+ if (options_dict) {
+ m_options_error = m_options.SetOptionsFromArray(*(options_dict));
+ // If we got an error don't bother with the arguments...
+ if (m_options_error.Fail())
+ return;
+ } else {
+ m_options_error.SetErrorString("Options array not an array");
+ return;
+ }
+ }
+ // Then fetch the args. Since the arguments can have usage masks you need
+ // an array of arrays.
+ StructuredData::ObjectSP args_object_sp
+ = scripter->GetArgumentsForCommandObject(cmd_obj_sp);
+ if (args_object_sp) {
+ StructuredData::Array *args_array = args_object_sp->GetAsArray();
+ if (!args_array) {
+ m_args_error.SetErrorString("Argument specification is not an array");
+ return;
+ }
+ size_t counter = 0;
+
+ // This is the Array::ForEach function that handles the
+ // CommandArgumentEntry arrays one by one:
+ auto arg_array_adder = [this, &counter] (StructuredData::Object *object)
+ -> bool {
+ // This is the Array::ForEach function to add argument entries:
+ CommandArgumentEntry this_entry;
+ size_t elem_counter = 0;
+ auto args_adder = [this, counter, &elem_counter, &this_entry]
+ (StructuredData::Object *object) -> bool {
+ // The arguments definition has three fields, the argument type, the
+ // repeat and the usage mask.
+ CommandArgumentType arg_type = eArgTypeNone;
+ ArgumentRepetitionType arg_repetition = eArgRepeatOptional;
+ uint32_t arg_opt_set_association;
+
+ auto report_error = [this, elem_counter, counter]
+ (const char *err_txt) -> bool {
+ m_args_error.SetErrorStringWithFormatv("Element {0} of arguments "
+ "list element {1}: %s.", elem_counter, counter, err_txt);
+ return false;
+ };
+
+ StructuredData::Dictionary *arg_dict = object->GetAsDictionary();
+ if (!arg_dict) {
+ report_error("is not a dictionary.");
+ return false;
+ }
+ // Argument Type:
+ StructuredData::ObjectSP obj_sp
+ = arg_dict->GetValueForKey("arg_type");
+ if (obj_sp) {
+ StructuredData::UnsignedInteger *uint_val
+ = obj_sp->GetAsUnsignedInteger();
+ if (!uint_val) {
+ report_error("value type must be an unsigned integer");
+ return false;
+ }
+ uint64_t arg_type_int = uint_val->GetValue();
+ if (arg_type_int >= eArgTypeLastArg) {
+ report_error("value type beyond ArgumentRepetitionType bounds");
+ return false;
+ }
+ arg_type = (CommandArgumentType) arg_type_int;
+ }
+ // Repeat Value:
+ obj_sp = arg_dict->GetValueForKey("repeat");
+ std::optional<ArgumentRepetitionType> repeat;
+ if (obj_sp) {
+ llvm::StringRef repeat_str = obj_sp->GetStringValue();
+ if (repeat_str.empty()) {
+ report_error("repeat value is empty");
+ return false;
+ }
+ repeat = ArgRepetitionFromString(repeat_str);
+ if (!repeat) {
+ report_error("invalid repeat value");
+ return false;
+ }
+ arg_repetition = *repeat;
+ }
+
+ // Usage Mask:
+ obj_sp = arg_dict->GetValueForKey("groups");
+ m_args_error = CommandOptions::ParseUsageMaskFromArray(obj_sp,
+ counter, arg_opt_set_association);
+ this_entry.emplace_back(arg_type, arg_repetition,
+ arg_opt_set_association);
+ elem_counter++;
+ return true;
+ };
+ StructuredData::Array *args_array = object->GetAsArray();
+ if (!args_array) {
+ m_args_error.SetErrorStringWithFormatv("Argument definition element "
+ "{0} is not an array", counter);
+ }
+
+ args_array->ForEach(args_adder);
+ if (m_args_error.Fail())
+ return false;
+ if (this_entry.empty()) {
+ m_args_error.SetErrorStringWithFormatv("Argument definition element "
+ "{0} is empty", counter);
+ return false;
+ }
+ m_arguments.push_back(this_entry);
+ counter++;
+ return true;
+ }; // end of arg_array_adder
+ // Here we actually parse the args definition:
+ args_array->ForEach(arg_array_adder);
+ }
+ }
+
+ ~CommandObjectScriptingObjectParsed() override = default;
+
+ Status GetOptionsError() { return m_options_error; }
+ Status GetArgsError() { return m_args_error; }
+ bool WantsCompletion() override { return true; }
+
+ bool IsRemovable() const override { return true; }
+
+ ScriptedCommandSynchronicity GetSynchronicity() { return m_synchro; }
+
+ llvm::StringRef GetHelp() override {
+ if (m_fetched_help_short)
+ return CommandObjectParsed::GetHelp();
+ ScriptInterpreter *scripter = GetDebugger().GetScriptInterpreter();
+ if (!scripter)
+ return CommandObjectParsed::GetHelp();
+ std::string docstring;
+ m_fetched_help_short =
+ scripter->GetShortHelpForCommandObject(m_cmd_obj_sp, docstring);
+ if (!docstring.empty())
+ SetHelp(docstring);
+
+ return CommandObjectParsed::GetHelp();
+ }
+
+ llvm::StringRef GetHelpLong() override {
+ if (m_fetched_help_long)
+ return CommandObjectParsed::GetHelpLong();
+
+ ScriptInterpreter *scripter = GetDebugger().GetScriptInterpreter();
+ if (!scripter)
+ return CommandObjectParsed::GetHelpLong();
+
+ std::string docstring;
+ m_fetched_help_long =
+ scripter->GetLongHelpForCommandObject(m_cmd_obj_sp, docstring);
+ if (!docstring.empty())
+ SetHelpLong(docstring);
+ return CommandObjectParsed::GetHelpLong();
+ }
+
+ Options *GetOptions() override { return &m_options; }
+
+
+protected:
+ void DoExecute(Args &args,
+ CommandReturnObject &result) override {
+ ScriptInterpreter *scripter = GetDebugger().GetScriptInterpreter();
+
+ Status error;
+
+ result.SetStatus(eReturnStatusInvalid);
+
+ if (!scripter ||
+ !scripter->RunScriptBasedParsedCommand(m_cmd_obj_sp, args,
+ m_synchro, result, error, m_exe_ctx)) {
+ result.AppendError(error.AsCString());
+ } else {
+ // Don't change the status if the command already set it...
+ if (result.GetStatus() == eReturnStatusInvalid) {
+ if (result.GetOutputData().empty())
+ result.SetStatus(eReturnStatusSuccessFinishNoResult);
+ else
+ result.SetStatus(eReturnStatusSuccessFinishResult);
+ }
+ }
+ }
+
+private:
+ StructuredData::GenericSP m_cmd_obj_sp;
+ ScriptedCommandSynchronicity m_synchro;
+ CommandOptions m_options;
+ Status m_options_error;
+ Status m_args_error;
+ bool m_fetched_help_short : 1;
+ bool m_fetched_help_long : 1;
+};
+
+std::unordered_set<std::string>
+ CommandObjectScriptingObjectParsed::CommandOptions::g_string_storer;
+
// CommandObjectCommandsScriptImport
#define LLDB_OPTIONS_script_import
#include "CommandOptions.inc"
@@ -1439,6 +2135,9 @@ protected:
case 'o':
m_overwrite_lazy = eLazyBoolYes;
break;
+ case 'p':
+ m_parsed_command = true;
+ break;
case 's':
m_synchronicity =
(ScriptedCommandSynchronicity)OptionArgParser::ToOptionEnum(
@@ -1474,6 +2173,7 @@ protected:
m_completion_type = eNoCompletion;
m_overwrite_lazy = eLazyBoolCalculate;
m_synchronicity = eScriptedCommandSynchronicitySynchronous;
+ m_parsed_command = false;
}
llvm::ArrayRef<OptionDefinition> GetDefinitions() override {
@@ -1489,6 +2189,7 @@ protected:
ScriptedCommandSynchronicity m_synchronicity =
eScriptedCommandSynchronicitySynchronous;
CompletionType m_completion_type = eNoCompletion;
+ bool m_parsed_command = false;
};
void IOHandlerActivated(IOHandler &io_handler, bool interactive) override {
@@ -1628,10 +2329,16 @@ protected:
"'{0}'", m_options.m_class_name);
return;
}
-
- new_cmd_sp.reset(new CommandObjectScriptingObject(
- m_interpreter, m_cmd_name, cmd_obj_sp, m_synchronicity,
- m_completion_type));
+
+ if (m_options.m_parsed_command) {
+ new_cmd_sp = CommandObjectScriptingObjectParsed::Create(m_interpreter,
+ m_cmd_name, cmd_obj_sp, m_synchronicity, result);
+ if (!result.Succeeded())
+ return;
+ } else
+ new_cmd_sp.reset(new CommandObjectScriptingObjectRaw(
+ m_interpreter, m_cmd_name, cmd_obj_sp, m_synchronicity,
+ m_completion_type));
}
// Assume we're going to succeed...
diff --git a/lldb/source/Commands/CommandObjectProcess.cpp b/lldb/source/Commands/CommandObjectProcess.cpp
index c7b874d..722b0e0 100644
--- a/lldb/source/Commands/CommandObjectProcess.cpp
+++ b/lldb/source/Commands/CommandObjectProcess.cpp
@@ -1591,26 +1591,6 @@ public:
Options *GetOptions() override { return &m_options; }
- bool VerifyCommandOptionValue(const std::string &option, int &real_value) {
- bool okay = true;
- bool success = false;
- bool tmp_value = OptionArgParser::ToBoolean(option, false, &success);
-
- if (success && tmp_value)
- real_value = 1;
- else if (success && !tmp_value)
- real_value = 0;
- else {
- // If the value isn't 'true' or 'false', it had better be 0 or 1.
- if (!llvm::to_integer(option, real_value))
- real_value = 3;
- if (real_value != 0 && real_value != 1)
- okay = false;
- }
-
- return okay;
- }
-
void PrintSignalHeader(Stream &str) {
str.Printf("NAME PASS STOP NOTIFY\n");
str.Printf("=========== ===== ===== ======\n");
@@ -1666,33 +1646,52 @@ protected:
// the user's options.
ProcessSP process_sp = target.GetProcessSP();
- int stop_action = -1; // -1 means leave the current setting alone
- int pass_action = -1; // -1 means leave the current setting alone
- int notify_action = -1; // -1 means leave the current setting alone
+ std::optional<bool> stop_action = {};
+ std::optional<bool> pass_action = {};
+ std::optional<bool> notify_action = {};
- if (!m_options.stop.empty() &&
- !VerifyCommandOptionValue(m_options.stop, stop_action)) {
- result.AppendError("Invalid argument for command option --stop; must be "
- "true or false.\n");
- return;
+ if (!m_options.stop.empty()) {
+ bool success = false;
+ bool value = OptionArgParser::ToBoolean(m_options.stop, false, &success);
+ if (!success) {
+ result.AppendError(
+ "Invalid argument for command option --stop; must be "
+ "true or false.\n");
+ return;
+ }
+
+ stop_action = value;
}
- if (!m_options.notify.empty() &&
- !VerifyCommandOptionValue(m_options.notify, notify_action)) {
- result.AppendError("Invalid argument for command option --notify; must "
- "be true or false.\n");
- return;
+ if (!m_options.pass.empty()) {
+ bool success = false;
+ bool value = OptionArgParser::ToBoolean(m_options.pass, false, &success);
+ if (!success) {
+ result.AppendError(
+ "Invalid argument for command option --pass; must be "
+ "true or false.\n");
+ return;
+ }
+ pass_action = value;
}
- if (!m_options.pass.empty() &&
- !VerifyCommandOptionValue(m_options.pass, pass_action)) {
- result.AppendError("Invalid argument for command option --pass; must be "
- "true or false.\n");
- return;
+ if (!m_options.notify.empty()) {
+ bool success = false;
+ bool value =
+ OptionArgParser::ToBoolean(m_options.notify, false, &success);
+ if (!success) {
+ result.AppendError("Invalid argument for command option --notify; must "
+ "be true or false.\n");
+ return;
+ }
+ notify_action = value;
+ }
+
+ if (!m_options.notify.empty() && !notify_action.has_value()) {
}
- bool no_actions = (stop_action == -1 && pass_action == -1
- && notify_action == -1);
+ bool no_actions = (!stop_action.has_value() && !pass_action.has_value() &&
+ !notify_action.has_value());
if (m_options.only_target_values && !no_actions) {
result.AppendError("-t is for reporting, not setting, target values.");
return;
@@ -1730,16 +1729,14 @@ protected:
if (signals_sp) {
int32_t signo = signals_sp->GetSignalNumberFromName(arg.c_str());
if (signo != LLDB_INVALID_SIGNAL_NUMBER) {
- // Casting the actions as bools here should be okay, because
- // VerifyCommandOptionValue guarantees the value is either 0 or 1.
- if (stop_action != -1)
- signals_sp->SetShouldStop(signo, stop_action);
- if (pass_action != -1) {
- bool suppress = !pass_action;
+ if (stop_action.has_value())
+ signals_sp->SetShouldStop(signo, *stop_action);
+ if (pass_action.has_value()) {
+ bool suppress = !*pass_action;
signals_sp->SetShouldSuppress(signo, suppress);
}
- if (notify_action != -1)
- signals_sp->SetShouldNotify(signo, notify_action);
+ if (notify_action.has_value())
+ signals_sp->SetShouldNotify(signo, *notify_action);
++num_signals_set;
} else {
result.AppendErrorWithFormat("Invalid signal name '%s'\n",
@@ -1759,21 +1756,15 @@ protected:
}
num_signals_set = num_args;
}
- auto set_lazy_bool = [] (int action) -> LazyBool {
- LazyBool lazy;
- if (action == -1)
- lazy = eLazyBoolCalculate;
- else if (action)
- lazy = eLazyBoolYes;
- else
- lazy = eLazyBoolNo;
- return lazy;
+ auto set_lazy_bool = [](std::optional<bool> action) -> LazyBool {
+ if (!action.has_value())
+ return eLazyBoolCalculate;
+ return (*action) ? eLazyBoolYes : eLazyBoolNo;
};
// If there were no actions, we're just listing, don't add the dummy:
if (!no_actions)
- target.AddDummySignal(arg.ref(),
- set_lazy_bool(pass_action),
+ target.AddDummySignal(arg.ref(), set_lazy_bool(pass_action),
set_lazy_bool(notify_action),
set_lazy_bool(stop_action));
}
@@ -1781,18 +1772,19 @@ protected:
// No signal specified, if any command options were specified, update ALL
// signals. But we can't do this without a process since we don't know
// all the possible signals that might be valid for this target.
- if (((notify_action != -1) || (stop_action != -1) || (pass_action != -1))
- && process_sp) {
+ if ((notify_action.has_value() || stop_action.has_value() ||
+ pass_action.has_value()) &&
+ process_sp) {
if (m_interpreter.Confirm(
"Do you really want to update all the signals?", false)) {
int32_t signo = signals_sp->GetFirstSignalNumber();
while (signo != LLDB_INVALID_SIGNAL_NUMBER) {
- if (notify_action != -1)
- signals_sp->SetShouldNotify(signo, notify_action);
- if (stop_action != -1)
- signals_sp->SetShouldStop(signo, stop_action);
- if (pass_action != -1) {
- bool suppress = !pass_action;
+ if (notify_action.has_value())
+ signals_sp->SetShouldNotify(signo, *notify_action);
+ if (stop_action.has_value())
+ signals_sp->SetShouldStop(signo, *stop_action);
+ if (pass_action.has_value()) {
+ bool suppress = !*pass_action;
signals_sp->SetShouldSuppress(signo, suppress);
}
signo = signals_sp->GetNextSignalNumber(signo);
diff --git a/lldb/source/Commands/Options.td b/lldb/source/Commands/Options.td
index a87f457..dd732e3 100644
--- a/lldb/source/Commands/Options.td
+++ b/lldb/source/Commands/Options.td
@@ -805,19 +805,25 @@ let Command = "script add" in {
def script_add_function : Option<"function", "f">, Group<1>,
Arg<"PythonFunction">,
Desc<"Name of the Python function to bind to this command name.">;
- def script_add_class : Option<"class", "c">, Group<2>, Arg<"PythonClass">,
- Desc<"Name of the Python class to bind to this command name.">;
+ def script_add_class : Option<"class", "c">, Groups<[2,3]>,
+ Arg<"PythonClass">,
+ Desc<"Name of the Python class to bind to this command name.">;
def script_add_help : Option<"help", "h">, Group<1>, Arg<"HelpText">,
- Desc<"The help text to display for this command.">;
- def script_add_overwrite : Option<"overwrite", "o">, Groups<[1,2]>,
- Desc<"Overwrite an existing command at this node.">;
+ Desc<"The help text to display for this command.">;
+ def script_add_overwrite : Option<"overwrite", "o">,
+ Desc<"Overwrite an existing command at this node.">;
def script_add_synchronicity : Option<"synchronicity", "s">,
EnumArg<"ScriptedCommandSynchronicity">,
Desc<"Set the synchronicity of this command's executions with regard to "
"LLDB event system.">;
- def completion_type : Option<"completion-type", "C">,
- EnumArg<"CompletionType">,
- Desc<"Specify which completion type the command should use - if none is specified, the command won't use auto-completion.">;
+ def script_add_completion_type : Option<"completion-type", "C">,
+ Groups<[1,2]>, EnumArg<"CompletionType">,
+ Desc<"Specify which completion type the command should use - if none is "
+ "specified, the command won't use auto-completion.">;
+ def script_add_parsed_command : Option<"parsed", "p">, Group<3>,
+ Desc<"Make a parsed command. The command class will provide the command "
+ "definition by implementing get_options and get_arguments.">;
+
}
let Command = "container add" in {
diff --git a/lldb/source/Core/ValueObject.cpp b/lldb/source/Core/ValueObject.cpp
index e800428..840b100 100644
--- a/lldb/source/Core/ValueObject.cpp
+++ b/lldb/source/Core/ValueObject.cpp
@@ -1312,6 +1312,8 @@ bool ValueObject::DumpPrintableRepresentation(
break;
}
+ // If the requested display style produced no output, try falling back to
+ // alternative presentations.
if (str.empty()) {
if (val_obj_display == eValueObjectRepresentationStyleValue)
str = GetSummaryAsCString();
diff --git a/lldb/source/Expression/DWARFExpression.cpp b/lldb/source/Expression/DWARFExpression.cpp
index fe4928d..c061fd1 100644
--- a/lldb/source/Expression/DWARFExpression.cpp
+++ b/lldb/source/Expression/DWARFExpression.cpp
@@ -608,11 +608,10 @@ static bool Evaluate_DW_OP_entry_value(std::vector<Value> &stack,
StackFrameSP parent_frame = nullptr;
addr_t return_pc = LLDB_INVALID_ADDRESS;
uint32_t current_frame_idx = current_frame->GetFrameIndex();
- uint32_t num_frames = thread->GetStackFrameCount();
- for (uint32_t parent_frame_idx = current_frame_idx + 1;
- parent_frame_idx < num_frames; ++parent_frame_idx) {
+
+ for (uint32_t parent_frame_idx = current_frame_idx + 1;;parent_frame_idx++) {
parent_frame = thread->GetStackFrameAtIndex(parent_frame_idx);
- // Require a valid sequence of frames.
+ // If this is null, we're at the end of the stack.
if (!parent_frame)
break;
diff --git a/lldb/source/Interpreter/CommandObject.cpp b/lldb/source/Interpreter/CommandObject.cpp
index 6324c7e..6ed0fd1 100644
--- a/lldb/source/Interpreter/CommandObject.cpp
+++ b/lldb/source/Interpreter/CommandObject.cpp
@@ -447,6 +447,23 @@ bool CommandObject::IsPairType(ArgumentRepetitionType arg_repeat_type) {
(arg_repeat_type == eArgRepeatPairRangeOptional);
}
+std::optional<ArgumentRepetitionType>
+CommandObject::ArgRepetitionFromString(llvm::StringRef string) {
+ return llvm::StringSwitch<ArgumentRepetitionType>(string)
+ .Case("plain", eArgRepeatPlain)
+ .Case("optional", eArgRepeatOptional)
+ .Case("plus", eArgRepeatPlus)
+ .Case("star", eArgRepeatStar)
+ .Case("range", eArgRepeatRange)
+ .Case("pair-plain", eArgRepeatPairPlain)
+ .Case("pair-optional", eArgRepeatPairOptional)
+ .Case("pair-plus", eArgRepeatPairPlus)
+ .Case("pair-star", eArgRepeatPairStar)
+ .Case("pair-range", eArgRepeatPairRange)
+ .Case("pair-range-optional", eArgRepeatPairRangeOptional)
+ .Default({});
+}
+
static CommandObject::CommandArgumentEntry
OptSetFiltered(uint32_t opt_set_mask,
CommandObject::CommandArgumentEntry &cmd_arg_entry) {
diff --git a/lldb/source/Plugins/Platform/MacOSX/PlatformRemoteAppleXR.h b/lldb/source/Plugins/Platform/MacOSX/PlatformRemoteAppleXR.h
index 4fed6e1..2fbb6ca 100644
--- a/lldb/source/Plugins/Platform/MacOSX/PlatformRemoteAppleXR.h
+++ b/lldb/source/Plugins/Platform/MacOSX/PlatformRemoteAppleXR.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLDB_SOURCE_PLUGINS_PLATFORM_MACOSX_PLATFORMREMOTEAPPLEXR_H
+#define LLDB_SOURCE_PLUGINS_PLATFORM_MACOSX_PLATFORMREMOTEAPPLEXR_H
+
#include "PlatformRemoteDarwinDevice.h"
namespace lldb_private {
@@ -36,3 +39,5 @@ protected:
llvm::StringRef GetPlatformName() override;
};
} // namespace lldb_private
+
+#endif // LLDB_SOURCE_PLUGINS_PLATFORM_MACOSX_PLATFORMREMOTEAPPLEXR_H
diff --git a/lldb/source/Plugins/Process/Utility/StopInfoMachException.cpp b/lldb/source/Plugins/Process/Utility/StopInfoMachException.cpp
index d756354..7550432 100644
--- a/lldb/source/Plugins/Process/Utility/StopInfoMachException.cpp
+++ b/lldb/source/Plugins/Process/Utility/StopInfoMachException.cpp
@@ -26,6 +26,8 @@
#include "lldb/Target/Thread.h"
#include "lldb/Target/ThreadPlan.h"
#include "lldb/Target/UnixSignals.h"
+#include "lldb/Utility/LLDBLog.h"
+#include "lldb/Utility/Log.h"
#include "lldb/Utility/StreamString.h"
#include <optional>
@@ -596,6 +598,7 @@ StopInfoSP StopInfoMachException::CreateStopReasonWithMachException(
if (exc_type == 0)
return StopInfoSP();
+ bool not_stepping_but_got_singlestep_exception = false;
uint32_t pc_decrement = 0;
ExecutionContext exe_ctx(thread.shared_from_this());
Target *target = exe_ctx.GetTargetPtr();
@@ -720,30 +723,8 @@ StopInfoSP StopInfoMachException::CreateStopReasonWithMachException(
// is set
is_actual_breakpoint = true;
is_trace_if_actual_breakpoint_missing = true;
-#ifndef NDEBUG
- if (thread.GetTemporaryResumeState() != eStateStepping) {
- StreamString s;
- s.Printf("CreateStopReasonWithMachException got EXC_BREAKPOINT [1,0] "
- "indicating trace event, but thread is not tracing, it has "
- "ResumeState %d",
- thread.GetTemporaryResumeState());
- if (RegisterContextSP regctx = thread.GetRegisterContext()) {
- if (const RegisterInfo *ri = regctx->GetRegisterInfoByName("esr")) {
- uint32_t esr =
- (uint32_t)regctx->ReadRegisterAsUnsigned(ri, UINT32_MAX);
- if (esr != UINT32_MAX) {
- s.Printf(" esr value: 0x%" PRIx32, esr);
- }
- }
- }
- thread.GetProcess()->DumpPluginHistory(s);
- llvm::report_fatal_error(s.GetData());
- lldbassert(
- false &&
- "CreateStopReasonWithMachException got EXC_BREAKPOINT [1,0] "
- "indicating trace event, but thread was not doing a step.");
- }
-#endif
+ if (thread.GetTemporaryResumeState() != eStateStepping)
+ not_stepping_but_got_singlestep_exception = true;
}
if (exc_code == 0x102) // EXC_ARM_DA_DEBUG
{
@@ -825,6 +806,56 @@ StopInfoSP StopInfoMachException::CreateStopReasonWithMachException(
break;
}
- return StopInfoSP(new StopInfoMachException(thread, exc_type, exc_data_count,
- exc_code, exc_sub_code));
+ return std::make_shared<StopInfoMachException>(
+ thread, exc_type, exc_data_count, exc_code, exc_sub_code,
+ not_stepping_but_got_singlestep_exception);
+}
+
+// Detect an unusual situation on Darwin where:
+//
+// 0. We did an instruction-step before this.
+// 1. We have a hardware breakpoint or watchpoint set.
+// 2. We resumed the process, but not with an instruction-step.
+// 3. The thread gets an "instruction-step completed" mach exception.
+// 4. The pc has not advanced - it is the same as before.
+//
+// This method returns true for that combination of events.
+bool StopInfoMachException::WasContinueInterrupted(Thread &thread) {
+ Log *log = GetLog(LLDBLog::Step);
+
+ // We got an instruction-step completed mach exception but we were not
+ // doing an instruction step on this thread.
+ if (!m_not_stepping_but_got_singlestep_exception)
+ return false;
+
+ RegisterContextSP reg_ctx_sp(thread.GetRegisterContext());
+ std::optional<addr_t> prev_pc = thread.GetPreviousFrameZeroPC();
+ if (!reg_ctx_sp || !prev_pc)
+ return false;
+
+ // The previous pc value and current pc value are the same.
+ if (*prev_pc != reg_ctx_sp->GetPC())
+ return false;
+
+ // We have a watchpoint -- this is the kernel bug.
+ ProcessSP process_sp = thread.GetProcess();
+ if (process_sp->GetWatchpointResourceList().GetSize()) {
+ LLDB_LOGF(log,
+ "Thread stopped with insn-step completed mach exception but "
+ "thread was not stepping; there is a hardware watchpoint set.");
+ return true;
+ }
+
+ // We have a hardware breakpoint -- this is the kernel bug.
+ auto &bp_site_list = process_sp->GetBreakpointSiteList();
+ for (auto &site : bp_site_list.Sites()) {
+ if (site->IsHardware() && site->IsEnabled()) {
+ LLDB_LOGF(log,
+ "Thread stopped with insn-step completed mach exception but "
+ "thread was not stepping; there is a hardware breakpoint set.");
+ return true;
+ }
+ }
+
+ return false;
}
diff --git a/lldb/source/Plugins/Process/Utility/StopInfoMachException.h b/lldb/source/Plugins/Process/Utility/StopInfoMachException.h
index 541ef5e..c612ac4 100644
--- a/lldb/source/Plugins/Process/Utility/StopInfoMachException.h
+++ b/lldb/source/Plugins/Process/Utility/StopInfoMachException.h
@@ -31,9 +31,12 @@ public:
// Constructors and Destructors
StopInfoMachException(Thread &thread, uint32_t exc_type,
uint32_t exc_data_count, uint64_t exc_code,
- uint64_t exc_subcode)
+ uint64_t exc_subcode,
+ bool not_stepping_but_got_singlestep_exception)
: StopInfo(thread, exc_type), m_exc_data_count(exc_data_count),
- m_exc_code(exc_code), m_exc_subcode(exc_subcode) {}
+ m_exc_code(exc_code), m_exc_subcode(exc_subcode),
+ m_not_stepping_but_got_singlestep_exception(
+ not_stepping_but_got_singlestep_exception) {}
~StopInfoMachException() override = default;
@@ -58,10 +61,14 @@ public:
uint64_t exc_code, uint64_t exc_sub_code, uint64_t exc_sub_sub_code,
bool pc_already_adjusted = true, bool adjust_pc_if_needed = false);
+ bool WasContinueInterrupted(Thread &thread) override;
+
protected:
uint32_t m_exc_data_count;
uint64_t m_exc_code;
uint64_t m_exc_subcode;
+
+ bool m_not_stepping_but_got_singlestep_exception;
};
} // namespace lldb_private
diff --git a/lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.h b/lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.h
index 82eee76..88c1bb7 100644
--- a/lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.h
+++ b/lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.h
@@ -194,6 +194,8 @@ template <typename T, char F> struct PassthroughFormat {
};
template <> struct PythonFormat<char *> : PassthroughFormat<char *, 's'> {};
+template <> struct PythonFormat<const char *> :
+ PassthroughFormat<const char *, 's'> {};
template <> struct PythonFormat<char> : PassthroughFormat<char, 'b'> {};
template <>
struct PythonFormat<unsigned char> : PassthroughFormat<unsigned char, 'B'> {};
diff --git a/lldb/source/Plugins/ScriptInterpreter/Python/SWIGPythonBridge.h b/lldb/source/Plugins/ScriptInterpreter/Python/SWIGPythonBridge.h
index 7cdd557..c1a11b9 100644
--- a/lldb/source/Plugins/ScriptInterpreter/Python/SWIGPythonBridge.h
+++ b/lldb/source/Plugins/ScriptInterpreter/Python/SWIGPythonBridge.h
@@ -32,6 +32,7 @@ class SBStream;
class SBStructuredData;
class SBFileSpec;
class SBModuleSpec;
+class SBStringList;
} // namespace lldb
namespace lldb_private {
@@ -212,6 +213,12 @@ public:
lldb::DebuggerSP debugger, const char *args,
lldb_private::CommandReturnObject &cmd_retobj,
lldb::ExecutionContextRefSP exe_ctx_ref_sp);
+ static bool
+ LLDBSwigPythonCallParsedCommandObject(PyObject *implementor,
+ lldb::DebuggerSP debugger,
+ StructuredDataImpl &args_impl,
+ lldb_private::CommandReturnObject &cmd_retobj,
+ lldb::ExecutionContextRefSP exe_ctx_ref_sp);
static bool LLDBSwigPythonCallModuleInit(const char *python_module_name,
const char *session_dictionary_name,
diff --git a/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp b/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp
index ef7a2c1..dadcde6 100644
--- a/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp
+++ b/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp
@@ -24,6 +24,7 @@
#include "ScriptInterpreterPythonImpl.h"
#include "lldb/API/SBError.h"
+#include "lldb/API/SBExecutionContext.h"
#include "lldb/API/SBFrame.h"
#include "lldb/API/SBValue.h"
#include "lldb/Breakpoint/StoppointCallbackContext.h"
@@ -531,7 +532,6 @@ void ScriptInterpreterPythonImpl::IOHandlerInputComplete(IOHandler &io_handler,
break;
data_up->user_source.SplitIntoLines(data);
- StructuredData::ObjectSP empty_args_sp;
if (GenerateBreakpointCommandCallbackData(data_up->user_source,
data_up->script_source,
/*has_extra_args=*/false,
@@ -2766,6 +2766,58 @@ bool ScriptInterpreterPythonImpl::RunScriptBasedCommand(
return ret_val;
}
+bool ScriptInterpreterPythonImpl::RunScriptBasedParsedCommand(
+ StructuredData::GenericSP impl_obj_sp, Args &args,
+ ScriptedCommandSynchronicity synchronicity,
+ lldb_private::CommandReturnObject &cmd_retobj, Status &error,
+ const lldb_private::ExecutionContext &exe_ctx) {
+ if (!impl_obj_sp || !impl_obj_sp->IsValid()) {
+ error.SetErrorString("no function to execute");
+ return false;
+ }
+
+ lldb::DebuggerSP debugger_sp = m_debugger.shared_from_this();
+ lldb::ExecutionContextRefSP exe_ctx_ref_sp(new ExecutionContextRef(exe_ctx));
+
+ if (!debugger_sp.get()) {
+ error.SetErrorString("invalid Debugger pointer");
+ return false;
+ }
+
+ bool ret_val = false;
+
+ std::string err_msg;
+
+ {
+ Locker py_lock(this,
+ Locker::AcquireLock | Locker::InitSession |
+ (cmd_retobj.GetInteractive() ? 0 : Locker::NoSTDIN),
+ Locker::FreeLock | Locker::TearDownSession);
+
+ SynchronicityHandler synch_handler(debugger_sp, synchronicity);
+
+ StructuredData::ArraySP args_arr_sp(new StructuredData::Array());
+
+ for (const Args::ArgEntry &entry : args) {
+ args_arr_sp->AddStringItem(entry.ref());
+ }
+ StructuredDataImpl args_impl(args_arr_sp);
+
+ ret_val = SWIGBridge::LLDBSwigPythonCallParsedCommandObject(
+ static_cast<PyObject *>(impl_obj_sp->GetValue()), debugger_sp,
+ args_impl, cmd_retobj, exe_ctx_ref_sp);
+ }
+
+ if (!ret_val)
+ error.SetErrorString("unable to execute script function");
+ else if (cmd_retobj.GetStatus() == eReturnStatusFailed)
+ return false;
+
+ error.Clear();
+ return ret_val;
+}
+
+
/// In Python, a special attribute __doc__ contains the docstring for an object
/// (function, method, class, ...) if any is defined Otherwise, the attribute's
/// value is None.
@@ -2884,6 +2936,205 @@ uint32_t ScriptInterpreterPythonImpl::GetFlagsForCommandObject(
return result;
}
+StructuredData::ObjectSP
+ScriptInterpreterPythonImpl::GetOptionsForCommandObject(
+ StructuredData::GenericSP cmd_obj_sp) {
+ StructuredData::ObjectSP result = {};
+
+ Locker py_lock(this, Locker::AcquireLock | Locker::NoSTDIN, Locker::FreeLock);
+
+ static char callee_name[] = "get_options_definition";
+
+ if (!cmd_obj_sp)
+ return result;
+
+ PythonObject implementor(PyRefType::Borrowed,
+ (PyObject *)cmd_obj_sp->GetValue());
+
+ if (!implementor.IsAllocated())
+ return result;
+
+ PythonObject pmeth(PyRefType::Owned,
+ PyObject_GetAttrString(implementor.get(), callee_name));
+
+ if (PyErr_Occurred())
+ PyErr_Clear();
+
+ if (!pmeth.IsAllocated())
+ return result;
+
+ if (PyCallable_Check(pmeth.get()) == 0) {
+ if (PyErr_Occurred())
+ PyErr_Clear();
+ return result;
+ }
+
+ if (PyErr_Occurred())
+ PyErr_Clear();
+
+ PythonDictionary py_return = unwrapOrSetPythonException(
+ As<PythonDictionary>(implementor.CallMethod(callee_name)));
+
+ // if it fails, print the error but otherwise go on
+ if (PyErr_Occurred()) {
+ PyErr_Print();
+ PyErr_Clear();
+ return {};
+ }
+ return py_return.CreateStructuredObject();
+}
+
+StructuredData::ObjectSP
+ScriptInterpreterPythonImpl::GetArgumentsForCommandObject(
+ StructuredData::GenericSP cmd_obj_sp) {
+ StructuredData::ObjectSP result = {};
+
+ Locker py_lock(this, Locker::AcquireLock | Locker::NoSTDIN, Locker::FreeLock);
+
+ static char callee_name[] = "get_args_definition";
+
+ if (!cmd_obj_sp)
+ return result;
+
+ PythonObject implementor(PyRefType::Borrowed,
+ (PyObject *)cmd_obj_sp->GetValue());
+
+ if (!implementor.IsAllocated())
+ return result;
+
+ PythonObject pmeth(PyRefType::Owned,
+ PyObject_GetAttrString(implementor.get(), callee_name));
+
+ if (PyErr_Occurred())
+ PyErr_Clear();
+
+ if (!pmeth.IsAllocated())
+ return result;
+
+ if (PyCallable_Check(pmeth.get()) == 0) {
+ if (PyErr_Occurred())
+ PyErr_Clear();
+ return result;
+ }
+
+ if (PyErr_Occurred())
+ PyErr_Clear();
+
+ PythonList py_return = unwrapOrSetPythonException(
+ As<PythonList>(implementor.CallMethod(callee_name)));
+
+ // if it fails, print the error but otherwise go on
+ if (PyErr_Occurred()) {
+ PyErr_Print();
+ PyErr_Clear();
+ return {};
+ }
+ return py_return.CreateStructuredObject();
+}
+
+void
+ScriptInterpreterPythonImpl::OptionParsingStartedForCommandObject(
+ StructuredData::GenericSP cmd_obj_sp) {
+
+ Locker py_lock(this, Locker::AcquireLock | Locker::NoSTDIN, Locker::FreeLock);
+
+ static char callee_name[] = "option_parsing_started";
+
+ if (!cmd_obj_sp)
+ return ;
+
+ PythonObject implementor(PyRefType::Borrowed,
+ (PyObject *)cmd_obj_sp->GetValue());
+
+ if (!implementor.IsAllocated())
+ return;
+
+ PythonObject pmeth(PyRefType::Owned,
+ PyObject_GetAttrString(implementor.get(), callee_name));
+
+ if (PyErr_Occurred())
+ PyErr_Clear();
+
+ if (!pmeth.IsAllocated())
+ return;
+
+ if (PyCallable_Check(pmeth.get()) == 0) {
+ if (PyErr_Occurred())
+ PyErr_Clear();
+ return;
+ }
+
+ if (PyErr_Occurred())
+ PyErr_Clear();
+
+ // option_parsing_starting doesn't return anything, ignore anything but
+ // python errors.
+ unwrapOrSetPythonException(
+ As<bool>(implementor.CallMethod(callee_name)));
+
+ // if it fails, print the error but otherwise go on
+ if (PyErr_Occurred()) {
+ PyErr_Print();
+ PyErr_Clear();
+ return;
+ }
+}
+
+bool
+ScriptInterpreterPythonImpl::SetOptionValueForCommandObject(
+ StructuredData::GenericSP cmd_obj_sp, ExecutionContext *exe_ctx,
+ llvm::StringRef long_option, llvm::StringRef value) {
+ StructuredData::ObjectSP result = {};
+
+ Locker py_lock(this, Locker::AcquireLock | Locker::NoSTDIN, Locker::FreeLock);
+
+ static char callee_name[] = "set_option_value";
+
+ if (!cmd_obj_sp)
+ return false;
+
+ PythonObject implementor(PyRefType::Borrowed,
+ (PyObject *)cmd_obj_sp->GetValue());
+
+ if (!implementor.IsAllocated())
+ return false;
+
+ PythonObject pmeth(PyRefType::Owned,
+ PyObject_GetAttrString(implementor.get(), callee_name));
+
+ if (PyErr_Occurred())
+ PyErr_Clear();
+
+ if (!pmeth.IsAllocated())
+ return false;
+
+ if (PyCallable_Check(pmeth.get()) == 0) {
+ if (PyErr_Occurred())
+ PyErr_Clear();
+ return false;
+ }
+
+ if (PyErr_Occurred())
+ PyErr_Clear();
+
+ lldb::ExecutionContextRefSP exe_ctx_ref_sp;
+ if (exe_ctx)
+ exe_ctx_ref_sp.reset(new ExecutionContextRef(exe_ctx));
+ PythonObject ctx_ref_obj = SWIGBridge::ToSWIGWrapper(exe_ctx_ref_sp);
+
+ bool py_return = unwrapOrSetPythonException(
+ As<bool>(implementor.CallMethod(callee_name, ctx_ref_obj, long_option.str().c_str(),
+ value.str().c_str())));
+
+ // if it fails, print the error but otherwise go on
+ if (PyErr_Occurred()) {
+ PyErr_Print();
+ PyErr_Clear();
+ return false;
+ }
+ return py_return;
+}
+
bool ScriptInterpreterPythonImpl::GetLongHelpForCommandObject(
StructuredData::GenericSP cmd_obj_sp, std::string &dest) {
dest.clear();
diff --git a/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPythonImpl.h b/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPythonImpl.h
index a334998..fcd21df 100644
--- a/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPythonImpl.h
+++ b/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPythonImpl.h
@@ -182,6 +182,13 @@ public:
lldb_private::CommandReturnObject &cmd_retobj, Status &error,
const lldb_private::ExecutionContext &exe_ctx) override;
+ virtual bool RunScriptBasedParsedCommand(
+ StructuredData::GenericSP impl_obj_sp, Args& args,
+ ScriptedCommandSynchronicity synchronicity,
+ lldb_private::CommandReturnObject &cmd_retobj, Status &error,
+ const lldb_private::ExecutionContext &exe_ctx) override;
+
+
Status GenerateFunction(const char *signature, const StringList &input,
bool is_callback) override;
@@ -212,6 +219,20 @@ public:
bool GetLongHelpForCommandObject(StructuredData::GenericSP cmd_obj_sp,
std::string &dest) override;
+
+ StructuredData::ObjectSP
+ GetOptionsForCommandObject(StructuredData::GenericSP cmd_obj_sp) override;
+
+ StructuredData::ObjectSP
+ GetArgumentsForCommandObject(StructuredData::GenericSP cmd_obj_sp) override;
+
+ bool SetOptionValueForCommandObject(StructuredData::GenericSP cmd_obj_sp,
+ ExecutionContext *exe_ctx,
+ llvm::StringRef long_option,
+ llvm::StringRef value) override;
+
+ void OptionParsingStartedForCommandObject(
+ StructuredData::GenericSP cmd_obj_sp) override;
bool CheckObjectExists(const char *name) override {
if (!name || !name[0])
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFDeclContext.h b/lldb/source/Plugins/SymbolFile/DWARF/DWARFDeclContext.h
index a20a862..7e6c5f5 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFDeclContext.h
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFDeclContext.h
@@ -47,6 +47,10 @@ public:
DWARFDeclContext() : m_entries() {}
+ DWARFDeclContext(llvm::ArrayRef<Entry> entries) {
+ llvm::append_range(m_entries, entries);
+ }
+
void AppendDeclContext(dw_tag_t tag, const char *name) {
m_entries.push_back(Entry(tag, name));
}
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.cpp b/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.cpp
index b718f98..4da0d56 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.cpp
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.cpp
@@ -13,6 +13,7 @@
#include "lldb/Core/Module.h"
#include "lldb/Utility/RegularExpression.h"
#include "lldb/Utility/Stream.h"
+#include "llvm/ADT/Sequence.h"
#include <optional>
using namespace lldb_private;
@@ -218,6 +219,108 @@ void DebugNamesDWARFIndex::GetCompleteObjCClass(
m_fallback.GetCompleteObjCClass(class_name, must_be_implementation, callback);
}
+namespace {
+using Entry = llvm::DWARFDebugNames::Entry;
+
+/// If `entry` and all of its parents have an `IDX_parent`, use that information
+/// to build and return a list of at most `max_parents` parent Entries.
+/// `entry` itself is not included in the list.
+/// If any parent does not have an `IDX_parent`, or the Entry data is corrupted,
+/// nullopt is returned.
+std::optional<llvm::SmallVector<Entry, 4>>
+getParentChain(Entry entry, uint32_t max_parents) {
+ llvm::SmallVector<Entry, 4> parent_entries;
+
+ do {
+ if (!entry.hasParentInformation())
+ return std::nullopt;
+
+ llvm::Expected<std::optional<Entry>> parent = entry.getParentDIEEntry();
+ if (!parent) {
+ // Bad data.
+ LLDB_LOG_ERROR(
+ GetLog(DWARFLog::Lookups), parent.takeError(),
+ "Failed to extract parent entry from a non-empty IDX_parent");
+ return std::nullopt;
+ }
+
+ // Last parent in the chain.
+ if (!parent->has_value())
+ break;
+
+ parent_entries.push_back(**parent);
+ entry = **parent;
+ } while (parent_entries.size() < max_parents);
+
+ return parent_entries;
+}
+} // namespace
+
+void DebugNamesDWARFIndex::GetFullyQualifiedType(
+ const DWARFDeclContext &context,
+ llvm::function_ref<bool(DWARFDIE die)> callback) {
+ if (context.GetSize() == 0)
+ return;
+
+ llvm::StringRef leaf_name = context[0].name;
+ llvm::SmallVector<llvm::StringRef> parent_names;
+ for (auto idx : llvm::seq<int>(1, context.GetSize()))
+ parent_names.emplace_back(context[idx].name);
+
+ // For each entry, grab its parent chain and check if we have a match.
+ for (const DebugNames::Entry &entry :
+ m_debug_names_up->equal_range(leaf_name)) {
+ if (!isType(entry.tag()))
+ continue;
+
+ // Grab at most one extra parent, subsequent parents are not necessary to
+ // test equality.
+ std::optional<llvm::SmallVector<Entry, 4>> parent_chain =
+ getParentChain(entry, parent_names.size() + 1);
+
+ if (!parent_chain) {
+ // Fallback: use the base class implementation.
+ if (!ProcessEntry(entry, [&](DWARFDIE die) {
+ return GetFullyQualifiedTypeImpl(context, die, callback);
+ }))
+ return;
+ continue;
+ }
+
+ if (SameParentChain(parent_names, *parent_chain) &&
+ !ProcessEntry(entry, callback))
+ return;
+ }
+}
+
+bool DebugNamesDWARFIndex::SameParentChain(
+ llvm::ArrayRef<llvm::StringRef> parent_names,
+ llvm::ArrayRef<DebugNames::Entry> parent_entries) const {
+
+ if (parent_entries.size() != parent_names.size())
+ return false;
+
+ auto SameAsEntryATName = [this](llvm::StringRef name,
+ const DebugNames::Entry &entry) {
+ // Peek at the AT_name of `entry` and test equality to `name`.
+ auto maybe_dieoffset = entry.getDIEUnitOffset();
+ if (!maybe_dieoffset)
+ return false;
+ auto die_ref = ToDIERef(entry);
+ if (!die_ref)
+ return false;
+ return name == m_debug_info.PeekDIEName(*die_ref);
+ };
+
+ // If the AT_name of any parent fails to match the expected name, we don't
+ // have a match.
+ for (auto [parent_name, parent_entry] :
+ llvm::zip_equal(parent_names, parent_entries))
+ if (!SameAsEntryATName(parent_name, parent_entry))
+ return false;
+ return true;
+}
+
void DebugNamesDWARFIndex::GetTypes(
ConstString name, llvm::function_ref<bool(DWARFDIE die)> callback) {
for (const DebugNames::Entry &entry :
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.h b/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.h
index cca0913..b54dd11 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.h
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.h
@@ -42,6 +42,11 @@ public:
void GetCompleteObjCClass(
ConstString class_name, bool must_be_implementation,
llvm::function_ref<bool(DWARFDIE die)> callback) override;
+
+ /// Uses DWARF5's IDX_parent fields, when available, to speed up this query.
+ void GetFullyQualifiedType(
+ const DWARFDeclContext &context,
+ llvm::function_ref<bool(DWARFDIE die)> callback) override;
void GetTypes(ConstString name,
llvm::function_ref<bool(DWARFDIE die)> callback) override;
void GetTypes(const DWARFDeclContext &context,
@@ -83,6 +88,10 @@ private:
bool ProcessEntry(const DebugNames::Entry &entry,
llvm::function_ref<bool(DWARFDIE die)> callback);
+ /// Returns true if `parent_entries` have identical names to `parent_names`.
+ bool SameParentChain(llvm::ArrayRef<llvm::StringRef> parent_names,
+ llvm::ArrayRef<DebugNames::Entry> parent_entries) const;
+
static void MaybeLogLookupError(llvm::Error error,
const DebugNames::NameIndex &ni,
llvm::StringRef name);
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.h b/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.h
index 60baf69..01518b2 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.h
+++ b/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.h
@@ -373,6 +373,9 @@ public:
Type *ResolveTypeUID(const DIERef &die_ref);
+ /// Returns the DWARFIndex for this symbol, if it exists.
+ DWARFIndex *getIndex() { return m_index.get(); }
+
protected:
SymbolFileDWARF(const SymbolFileDWARF &) = delete;
const SymbolFileDWARF &operator=(const SymbolFileDWARF &) = delete;
diff --git a/lldb/source/Target/Thread.cpp b/lldb/source/Target/Thread.cpp
index 8ae2179..4dfad23 100644
--- a/lldb/source/Target/Thread.cpp
+++ b/lldb/source/Target/Thread.cpp
@@ -221,7 +221,7 @@ Thread::Thread(Process &process, lldb::tid_t tid, bool use_invalid_index_id)
: process.GetNextThreadIndexID(tid)),
m_reg_context_sp(), m_state(eStateUnloaded), m_state_mutex(),
m_frame_mutex(), m_curr_frames_sp(), m_prev_frames_sp(),
- m_resume_signal(LLDB_INVALID_SIGNAL_NUMBER),
+ m_prev_framezero_pc(), m_resume_signal(LLDB_INVALID_SIGNAL_NUMBER),
m_resume_state(eStateRunning), m_temporary_resume_state(eStateRunning),
m_unwinder_up(), m_destroy_called(false),
m_override_should_notify(eLazyBoolCalculate),
@@ -250,6 +250,7 @@ void Thread::DestroyThread() {
std::lock_guard<std::recursive_mutex> guard(m_frame_mutex);
m_curr_frames_sp.reset();
m_prev_frames_sp.reset();
+ m_prev_framezero_pc.reset();
}
void Thread::BroadcastSelectedFrameChange(StackID &new_frame_id) {
@@ -422,6 +423,12 @@ lldb::StopInfoSP Thread::GetPrivateStopInfo(bool calculate) {
}
}
}
+
+ // If we were resuming the process and it was interrupted,
+ // return no stop reason. This thread would like to resume.
+ if (m_stop_info_sp && m_stop_info_sp->WasContinueInterrupted(*this))
+ return {};
+
return m_stop_info_sp;
}
@@ -1408,16 +1415,22 @@ StackFrameListSP Thread::GetStackFrameList() {
return m_curr_frames_sp;
}
+std::optional<addr_t> Thread::GetPreviousFrameZeroPC() {
+ return m_prev_framezero_pc;
+}
+
void Thread::ClearStackFrames() {
std::lock_guard<std::recursive_mutex> guard(m_frame_mutex);
GetUnwinder().Clear();
+ m_prev_framezero_pc.reset();
+ if (RegisterContextSP reg_ctx_sp = GetRegisterContext())
+ m_prev_framezero_pc = reg_ctx_sp->GetPC();
// Only store away the old "reference" StackFrameList if we got all its
// frames:
// FIXME: At some point we can try to splice in the frames we have fetched
- // into
- // the new frame as we make it, but let's not try that now.
+ // into the new frame as we make it, but let's not try that now.
if (m_curr_frames_sp && m_curr_frames_sp->GetAllFramesFetched())
m_prev_frames_sp.swap(m_curr_frames_sp);
m_curr_frames_sp.reset();
diff --git a/lldb/test/API/commands/command/script/add/TestAddParsedCommand.py b/lldb/test/API/commands/command/script/add/TestAddParsedCommand.py
new file mode 100644
index 0000000..7dba9c6
--- /dev/null
+++ b/lldb/test/API/commands/command/script/add/TestAddParsedCommand.py
@@ -0,0 +1,146 @@
+"""
+Test option and argument definitions in parsed script commands
+"""
+
+
+import sys
+import os
+import lldb
+from lldbsuite.test.decorators import *
+from lldbsuite.test.lldbtest import *
+
+
+class ParsedCommandTestCase(TestBase):
+ NO_DEBUG_INFO_TESTCASE = True
+
+ def test(self):
+ self.pycmd_tests()
+
+ def check_help_options(self, cmd_name, opt_list, substrs = []):
+ """
+ Pass the command name in cmd_name and a vector of the short option, type & long option.
+ This will append the checks for all the options and test "help command".
+ Any strings already in substrs will also be checked.
+ Any element in opt list that begin with "+" will be added to the checked strings as is.
+ """
+ for elem in opt_list:
+ if elem[0] == "+":
+ substrs.append(elem[1:])
+ else:
+ (short_opt, type, long_opt) = elem
+ substrs.append(f"-{short_opt} <{type}> ( --{long_opt} <{type}> )")
+ print(f"Opt Vec\n{substrs}")
+ self.expect("help " + cmd_name, substrs = substrs)
+
+ def pycmd_tests(self):
+ source_dir = self.getSourceDir()
+ test_file_path = os.path.join(source_dir, "test_commands.py")
+ self.runCmd("command script import " + test_file_path)
+ self.expect("help", substrs = ["no-args", "one-arg-no-opt", "two-args"])
+
+ # Test that we did indeed add these commands as user commands:
+
+ # This is the function to remove the custom commands in order to have a
+ # clean slate for the next test case.
+ def cleanup():
+ self.runCmd("command script delete no-args one-arg-no-opt two-args", check=False)
+
+ # Execute the cleanup function during test case tear down.
+ self.addTearDownHook(cleanup)
+
+ # First test the no arguments command. Make sure the help is right:
+ no_arg_opts = [["b", "boolean", "bool-arg"],
+ "+a boolean arg, defaults to True",
+ ["d", "filename", "disk-file-name"],
+ "+An on disk filename",
+ ["e", "none", "enum-option"],
+ "+An enum, doesn't actually do anything",
+ "+Values: foo | bar | baz",
+ ["l", "linenum", "line-num"],
+ "+A line number",
+ ["s", "shlib-name", "shlib-name"],
+ "+A shared library name"]
+ substrs = ["Example command for use in debugging",
+ "Syntax: no-args <cmd-options>"]
+
+ self.check_help_options("no-args", no_arg_opts, substrs)
+
+ # Make sure the command doesn't accept arguments:
+ self.expect("no-args an-arg", substrs=["'no-args' doesn't take any arguments."],
+ error=True)
+
+ # Try setting the bool with the wrong value:
+ self.expect("no-args -b Something",
+ substrs=["Error setting option: bool-arg to Something"],
+ error=True)
+ # Try setting the enum to an illegal value as well:
+ self.expect("no-args --enum-option Something",
+ substrs=["error: Error setting option: enum-option to Something"],
+ error=True)
+
+ # Check some of the command groups:
+ self.expect("no-args -b true -s Something -l 10",
+ substrs=["error: invalid combination of options for the given command"],
+ error=True)
+
+ # Now set the bool arg correctly, note only the first option was set:
+ self.expect("no-args -b true", substrs=["bool-arg (set: True): True",
+ "shlib-name (set: False):",
+ "disk-file-name (set: False):",
+ "line-num (set: False):",
+ "enum-option (set: False):"])
+
+ # Now set the enum arg correctly, note only the first option was set:
+ self.expect("no-args -e foo", substrs=["bool-arg (set: False):",
+ "shlib-name (set: False):",
+ "disk-file-name (set: False):",
+ "line-num (set: False):",
+ "enum-option (set: True): foo"])
+ # Try a pair together:
+ self.expect("no-args -b false -s Something", substrs=["bool-arg (set: True): False",
+ "shlib-name (set: True): Something",
+ "disk-file-name (set: False):",
+ "line-num (set: False):",
+ "enum-option (set: False):"])
+
+ # Next try some completion tests:
+
+ interp = self.dbg.GetCommandInterpreter()
+ matches = lldb.SBStringList()
+ descriptions = lldb.SBStringList()
+
+ # First try an enum completion:
+ num_completions = interp.HandleCompletionWithDescriptions("no-args -e f", 12, 0,
+ 1000, matches, descriptions)
+ self.assertEqual(num_completions, 1, "Only one completion for foo")
+ self.assertEqual(matches.GetSize(), 2, "The first element is the complete additional text")
+ self.assertEqual(matches.GetStringAtIndex(0), "oo ", "And we got the right extra characters")
+ self.assertEqual(matches.GetStringAtIndex(1), "foo", "And we got the right match")
+ self.assertEqual(descriptions.GetSize(), 2, "descriptions matche the return length")
+ # FIXME: we don't return descriptions for enum elements
+ #self.assertEqual(descriptions.GetStringAtIndex(1), "does foo things", "And we got the right description")
+
+ # Now try an internal completer, the on disk file one is handy:
+ partial_name = os.path.join(source_dir, "test_")
+ cmd_str = f"no-args -d '{partial_name}'"
+
+ matches.Clear()
+ descriptions.Clear()
+ num_completions = interp.HandleCompletionWithDescriptions(cmd_str, len(cmd_str) - 1, 0,
+ 1000, matches, descriptions)
+ print(f"First: {matches.GetStringAtIndex(0)}\nSecond: {matches.GetStringAtIndex(1)}\nThird: {matches.GetStringAtIndex(2)}")
+ self.assertEqual(num_completions, 1, "Only one completion for source file")
+ self.assertEqual(matches.GetSize(), 2, "The first element is the complete line")
+ self.assertEqual(matches.GetStringAtIndex(0), "commands.py' ", "And we got the right extra characters")
+ self.assertEqual(matches.GetStringAtIndex(1), test_file_path, "And we got the right match")
+ self.assertEqual(descriptions.GetSize(), 2, "descriptions match the return length")
+ # FIXME: we don't return descriptions for enum elements
+ #self.assertEqual(descriptions.GetStringAtIndex(1), "does foo things", "And we got the right description")
+
+ # Try a command with arguments.
+ # FIXME: It should be enough to define an argument and it's type to get the completer
+ # wired up for that argument type if it is a known type. But that isn't wired up in the
+ # command parser yet, so I don't have any tests for that. We also don't currently check
+ # that the arguments passed match the argument specifications, so here I just pass a couple
+ # sets of arguments and make sure we get back what we put in:
+ self.expect("two-args 'First Argument' 'Second Argument'", substrs=["0: First Argument", "1: Second Argument"])
diff --git a/lldb/test/API/commands/command/script/add/test_commands.py b/lldb/test/API/commands/command/script/add/test_commands.py
new file mode 100644
index 0000000..801d588
--- /dev/null
+++ b/lldb/test/API/commands/command/script/add/test_commands.py
@@ -0,0 +1,174 @@
+"""
+Test defining commands using the lldb command definitions
+"""
+import inspect
+import sys
+import lldb
+from lldb.plugins.parsed_cmd import ParsedCommand
+
+class ReportingCmd(ParsedCommand):
+ def __init__(self, debugger, unused):
+ super().__init__(debugger, unused)
+
+ def __call__(self, debugger, args_array, exe_ctx, result):
+ opt_def = self.get_options_definition()
+ if len(opt_def):
+ result.AppendMessage("Options:\n")
+ for long_option, elem in opt_def.items():
+ dest = elem["dest"]
+ result.AppendMessage(f"{long_option} (set: {elem['_value_set']}): {object.__getattribute__(self.ov_parser, dest)}\n")
+ else:
+ result.AppendMessage("No options\n")
+
+ num_args = args_array.GetSize()
+ if num_args > 0:
+ result.AppendMessage(f"{num_args} arguments:")
+ for idx in range(0,num_args):
+ result.AppendMessage(f"{idx}: {args_array.GetItemAtIndex(idx).GetStringValue(10000)}\n")
+
+class NoArgsCommand(ReportingCmd):
+ program = "no-args"
+
+ def __init__(self, debugger, unused):
+ super().__init__(debugger, unused)
+
+ @classmethod
+ def register_lldb_command(cls, debugger, module_name):
+ ParsedCommand.do_register_cmd(cls, debugger, module_name)
+
+ def setup_command_definition(self):
+ self.ov_parser.add_option(
+ "b",
+ "bool-arg",
+ "a boolean arg, defaults to True",
+ value_type = lldb.eArgTypeBoolean,
+ groups = [1,2],
+ dest = "bool_arg",
+ default = True
+ )
+
+ self.ov_parser.add_option(
+ "s",
+ "shlib-name",
+ "A shared library name.",
+ value_type=lldb.eArgTypeShlibName,
+ groups = [1, [3,4]],
+ dest = "shlib_name",
+ default = None
+ )
+
+ self.ov_parser.add_option(
+ "d",
+ "disk-file-name",
+ "An on disk filename",
+ value_type = lldb.eArgTypeFilename,
+ dest = "disk_file_name",
+ default = None
+ )
+
+ self.ov_parser.add_option(
+ "l",
+ "line-num",
+ "A line number",
+ value_type = lldb.eArgTypeLineNum,
+ groups = 3,
+ dest = "line_num",
+ default = 0
+ )
+
+ self.ov_parser.add_option(
+ "e",
+ "enum-option",
+ "An enum, doesn't actually do anything",
+ enum_values = [["foo", "does foo things"],
+ ["bar", "does bar things"],
+ ["baz", "does baz things"]],
+ groups = 4,
+ dest = "enum_option",
+ default = "foo"
+ )
+
+ def get_short_help(self):
+ return "Example command for use in debugging"
+
+ def get_long_help(self):
+ return self.help_string
+
+class OneArgCommandNoOptions(ReportingCmd):
+ program = "one-arg-no-opt"
+
+ def __init__(self, debugger, unused):
+ super().__init__(debugger, unused)
+
+ @classmethod
+ def register_lldb_command(cls, debugger, module_name):
+ ParsedCommand.do_register_cmd(cls, debugger, module_name)
+
+ def setup_command_definition(self):
+ self.ov_parser.add_argument_set([self.ov_parser.make_argument_element(lldb.eArgTypeSourceFile, "plain")])
+
+ def get_short_help(self):
+ return "Example command for use in debugging"
+
+ def get_long_help(self):
+ return self.help_string
+
+class TwoArgGroupsCommand(ReportingCmd):
+ program = "two-args"
+
+ def __init__(self, debugger, unused):
+ super().__init__(debugger, unused)
+
+ @classmethod
+ def register_lldb_command(cls, debugger, module_name):
+ ParsedCommand.do_register_cmd(cls, debugger, module_name)
+
+ def setup_command_definition(self):
+ self.ov_parser.add_option(
+ "l",
+ "language",
+ "language defaults to None",
+ value_type = lldb.eArgTypeLanguage,
+ groups = [1,2],
+ dest = "language",
+ default = None
+ )
+
+ self.ov_parser.add_option(
+ "c",
+ "log-channel",
+ "log channel - defaults to lldb",
+ value_type=lldb.eArgTypeLogChannel,
+ groups = [1, 3],
+ dest = "log_channel",
+ default = "lldb"
+ )
+
+ self.ov_parser.add_option(
+ "p",
+ "process-name",
+ "A process name, defaults to None",
+ value_type = lldb.eArgTypeProcessName,
+ dest = "proc_name",
+ default = None
+ )
+
+ self.ov_parser.add_argument_set([self.ov_parser.make_argument_element(lldb.eArgTypeClassName, "plain", [1,2]),
+ self.ov_parser.make_argument_element(lldb.eArgTypeOffset, "optional", [1,2])])
+
+ self.ov_parser.add_argument_set([self.ov_parser.make_argument_element(lldb.eArgTypePythonClass, "plain", [3,4]),
+ self.ov_parser.make_argument_element(lldb.eArgTypePid, "optional", [3,4])])
+
+ def get_short_help(self):
+ return "Example command for use in debugging"
+
+ def get_long_help(self):
+ return self.help_string
+
+def __lldb_init_module(debugger, dict):
+ # Register all classes that have a register_lldb_command method
+ for _name, cls in inspect.getmembers(sys.modules[__name__]):
+ if inspect.isclass(cls) and callable(
+ getattr(cls, "register_lldb_command", None)
+ ):
+ cls.register_lldb_command(debugger, __name__)
diff --git a/lldb/test/API/commands/expression/test/TestExprs.py b/lldb/test/API/commands/expression/test/TestExprs.py
index e95c76b..0e3d2e6 100644
--- a/lldb/test/API/commands/expression/test/TestExprs.py
+++ b/lldb/test/API/commands/expression/test/TestExprs.py
@@ -12,7 +12,7 @@ o test_expr_commands_can_handle_quotes:
"""
-import unittest2
+import unittest
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
@@ -46,7 +46,7 @@ class BasicExprCommandsTestCase(TestBase):
# llvm.org/pr17135 <rdar://problem/14874559>
# APFloat::toString does not identify the correct (i.e. least) precision.
- @unittest2.expectedFailure
+ @unittest.expectedFailure
def test_floating_point_expr_commands(self):
self.build_and_run()
diff --git a/lldb/test/API/commands/platform/sdk/TestPlatformSDK.py b/lldb/test/API/commands/platform/sdk/TestPlatformSDK.py
index bf79a5b..6af5767 100644
--- a/lldb/test/API/commands/platform/sdk/TestPlatformSDK.py
+++ b/lldb/test/API/commands/platform/sdk/TestPlatformSDK.py
@@ -39,8 +39,8 @@ class PlatformSDKTestCase(TestBase):
@no_debug_info_test
@skipUnlessDarwin
- @expectedFailureIfFn(no_debugserver)
- @expectedFailureIfFn(port_not_available)
+ @skipTestIfFn(no_debugserver)
+ @skipTestIfFn(port_not_available)
@skipIfRemote
def test_macos_sdk(self):
self.build()
diff --git a/lldb/test/API/functionalities/breakpoint/hardware_breakpoints/require_hw_breakpoints/TestRequireHWBreakpoints.py b/lldb/test/API/functionalities/breakpoint/hardware_breakpoints/require_hw_breakpoints/TestRequireHWBreakpoints.py
index ae4f7ea..5325f0f 100644
--- a/lldb/test/API/functionalities/breakpoint/hardware_breakpoints/require_hw_breakpoints/TestRequireHWBreakpoints.py
+++ b/lldb/test/API/functionalities/breakpoint/hardware_breakpoints/require_hw_breakpoints/TestRequireHWBreakpoints.py
@@ -26,7 +26,7 @@ class BreakpointLocationsTestCase(HardwareBreakpointTestBase):
breakpoint = target.BreakpointCreateByLocation("main.c", 1)
self.assertTrue(breakpoint.IsHardware())
- @expectedFailureIfFn(HardwareBreakpointTestBase.supports_hw_breakpoints)
+ @skipTestIfFn(HardwareBreakpointTestBase.supports_hw_breakpoints)
def test_step_range(self):
"""Test stepping when hardware breakpoints are required."""
self.build()
@@ -49,7 +49,7 @@ class BreakpointLocationsTestCase(HardwareBreakpointTestBase):
"Could not create hardware breakpoint for thread plan" in error.GetCString()
)
- @expectedFailureIfFn(HardwareBreakpointTestBase.supports_hw_breakpoints)
+ @skipTestIfFn(HardwareBreakpointTestBase.supports_hw_breakpoints)
def test_step_out(self):
"""Test stepping out when hardware breakpoints are required."""
self.build()
@@ -71,7 +71,7 @@ class BreakpointLocationsTestCase(HardwareBreakpointTestBase):
"Could not create hardware breakpoint for thread plan" in error.GetCString()
)
- @expectedFailureIfFn(HardwareBreakpointTestBase.supports_hw_breakpoints)
+ @skipTestIfFn(HardwareBreakpointTestBase.supports_hw_breakpoints)
def test_step_over(self):
"""Test stepping over when hardware breakpoints are required."""
self.build()
@@ -91,7 +91,7 @@ class BreakpointLocationsTestCase(HardwareBreakpointTestBase):
# Was reported to sometimes pass on certain hardware.
@skipIf(oslist=["linux"], archs=["arm"])
- @expectedFailureIfFn(HardwareBreakpointTestBase.supports_hw_breakpoints)
+ @skipTestIfFn(HardwareBreakpointTestBase.supports_hw_breakpoints)
def test_step_until(self):
"""Test stepping until when hardware breakpoints are required."""
self.build()
diff --git a/lldb/test/API/functionalities/breakpoint/thread_plan_user_breakpoint/TestThreadPlanUserBreakpoint.py b/lldb/test/API/functionalities/breakpoint/thread_plan_user_breakpoint/TestThreadPlanUserBreakpoint.py
index d9b7426..ee597ad 100644
--- a/lldb/test/API/functionalities/breakpoint/thread_plan_user_breakpoint/TestThreadPlanUserBreakpoint.py
+++ b/lldb/test/API/functionalities/breakpoint/thread_plan_user_breakpoint/TestThreadPlanUserBreakpoint.py
@@ -8,7 +8,7 @@ and eStopReasonPlanComplete when breakpoint's condition fails or it is disabled.
"""
-import unittest2
+import unittest
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
diff --git a/lldb/test/API/functionalities/jitloader_gdb/TestJITLoaderGDB.py b/lldb/test/API/functionalities/jitloader_gdb/TestJITLoaderGDB.py
index a074215..98c0b14 100644
--- a/lldb/test/API/functionalities/jitloader_gdb/TestJITLoaderGDB.py
+++ b/lldb/test/API/functionalities/jitloader_gdb/TestJITLoaderGDB.py
@@ -1,7 +1,7 @@
"""Test for the JITLoaderGDB interface"""
-import unittest2
+import unittest
import os
import lldb
from lldbsuite.test import lldbutil
@@ -14,7 +14,7 @@ class JITLoaderGDBTestCase(TestBase):
lambda: "Skipped because the test crashes the test runner",
bugnumber="llvm.org/pr24702",
)
- @unittest2.expectedFailure # llvm.org/pr24702
+ @unittest.expectedFailure # llvm.org/pr24702
def test_bogus_values(self):
"""Test that we handle inferior misusing the GDB JIT interface"""
self.build()
diff --git a/lldb/test/API/functionalities/launch_stop_at_entry/TestStopAtEntry.py b/lldb/test/API/functionalities/launch_stop_at_entry/TestStopAtEntry.py
index 496f9c20..b4e2b39 100644
--- a/lldb/test/API/functionalities/launch_stop_at_entry/TestStopAtEntry.py
+++ b/lldb/test/API/functionalities/launch_stop_at_entry/TestStopAtEntry.py
@@ -49,15 +49,15 @@ class TestStopAtEntry(TestBase):
@skipUnlessDarwin
@skipIfRemote
- @expectedFailureIfFn(no_debugserver)
- @expectedFailureIfFn(port_not_available)
+ @skipTestIfFn(no_debugserver)
+ @skipTestIfFn(port_not_available)
def test_stop_remote_platform_sync(self):
self.do_test_stop_at_entry(True, True)
@skipUnlessDarwin
@skipIfRemote
- @expectedFailureIfFn(no_debugserver)
- @expectedFailureIfFn(port_not_available)
+ @skipTestIfFn(no_debugserver)
+ @skipTestIfFn(port_not_available)
def test_stop_remote_platform_async(self):
self.do_test_stop_at_entry(False, True)
diff --git a/lldb/test/API/functionalities/thread/state/TestThreadStates.py b/lldb/test/API/functionalities/thread/state/TestThreadStates.py
index e128ca8..56954c9 100644
--- a/lldb/test/API/functionalities/thread/state/TestThreadStates.py
+++ b/lldb/test/API/functionalities/thread/state/TestThreadStates.py
@@ -3,7 +3,7 @@ Test thread states.
"""
-import unittest2
+import unittest
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
@@ -41,14 +41,14 @@ class ThreadStateTestCase(TestBase):
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24660")
@expectedFailureNetBSD
# thread states not properly maintained
- @unittest2.expectedFailure # llvm.org/pr16712
+ @unittest.expectedFailure # llvm.org/pr16712
def test_state_after_expression(self):
"""Test thread state after expression."""
self.build()
self.thread_state_after_expression_test()
# thread states not properly maintained
- @unittest2.expectedFailure # llvm.org/pr15824 and <rdar://problem/28557237>
+ @unittest.expectedFailure # llvm.org/pr15824 and <rdar://problem/28557237>
@expectedFailureAll(
oslist=["windows"],
bugnumber="llvm.org/pr24668: Breakpoints not resolved correctly",
diff --git a/lldb/test/API/functionalities/tty/TestTerminal.py b/lldb/test/API/functionalities/tty/TestTerminal.py
index 457abd7..750cdb3 100644
--- a/lldb/test/API/functionalities/tty/TestTerminal.py
+++ b/lldb/test/API/functionalities/tty/TestTerminal.py
@@ -2,7 +2,7 @@
Test lldb command aliases.
"""
-import unittest2
+import unittest
import os
import lldb
from lldbsuite.test.decorators import *
@@ -17,13 +17,13 @@ class LaunchInTerminalTestCase(TestBase):
@skipUnlessDarwin
# If the test is being run under sudo, the spawned terminal won't retain that elevated
# privilege so it can't open the socket to talk back to the test case
- @unittest2.skipIf(
+ @unittest.skipIf(
hasattr(os, "geteuid") and os.geteuid() == 0, "test cannot be run as root"
)
# Do we need to disable this test if the testsuite is being run on a remote system?
# This env var is only defined when the shell is running in a local mac
# terminal window
- @unittest2.skipUnless(
+ @unittest.skipUnless(
"TERM_PROGRAM" in os.environ, "test must be run on local system"
)
@no_debug_info_test
diff --git a/lldb/test/API/lang/c/shared_lib/TestSharedLib.py b/lldb/test/API/lang/c/shared_lib/TestSharedLib.py
index 235b9b4..e0994aa 100644
--- a/lldb/test/API/lang/c/shared_lib/TestSharedLib.py
+++ b/lldb/test/API/lang/c/shared_lib/TestSharedLib.py
@@ -1,7 +1,7 @@
"""Test that types defined in shared libraries work correctly."""
-import unittest2
+import unittest
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
@@ -35,7 +35,7 @@ class SharedLibTestCase(TestBase):
"""Test that types work when defined in a shared library and forward-declared in the main executable, but with preloading disabled"""
self.common_test_expr(False)
- @unittest2.expectedFailure # llvm.org/PR36712
+ @unittest.expectedFailure # llvm.org/PR36712
def test_frame_variable(self):
"""Test that types work when defined in a shared library and forward-declared in the main executable"""
self.build()
diff --git a/lldb/test/API/lang/c/shared_lib_stripped_symbols/TestSharedLibStrippedSymbols.py b/lldb/test/API/lang/c/shared_lib_stripped_symbols/TestSharedLibStrippedSymbols.py
index 070bd88..6971fc0 100644
--- a/lldb/test/API/lang/c/shared_lib_stripped_symbols/TestSharedLibStrippedSymbols.py
+++ b/lldb/test/API/lang/c/shared_lib_stripped_symbols/TestSharedLibStrippedSymbols.py
@@ -1,7 +1,7 @@
"""Test that types defined in shared libraries with stripped symbols work correctly."""
-import unittest2
+import unittest
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
@@ -28,7 +28,7 @@ class SharedLibStrippedTestCase(TestBase):
)
@expectedFailureAll(oslist=["windows"])
- @unittest2.expectedFailure # llvm.org/PR36712
+ @unittest.expectedFailure # llvm.org/PR36712
def test_frame_variable(self):
"""Test that types work when defined in a shared library and forward-declared in the main executable"""
self.build()
diff --git a/lldb/test/API/lang/cpp/namespace/TestNamespaceLookup.py b/lldb/test/API/lang/cpp/namespace/TestNamespaceLookup.py
index 44cfbd2..b5e8115 100644
--- a/lldb/test/API/lang/cpp/namespace/TestNamespaceLookup.py
+++ b/lldb/test/API/lang/cpp/namespace/TestNamespaceLookup.py
@@ -3,7 +3,7 @@ Test the printing of anonymous and named namespace variables.
"""
-import unittest2
+import unittest
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
@@ -41,7 +41,7 @@ class NamespaceLookupTestCase(TestBase):
)
@skipIfWindows # This is flakey on Windows: llvm.org/pr38373
- @unittest2.expectedFailure # CU-local objects incorrectly scoped
+ @unittest.expectedFailure # CU-local objects incorrectly scoped
def test_scope_lookup_with_run_command_globals(self):
"""Test scope lookup of functions in lldb."""
self.build()
@@ -211,7 +211,7 @@ class NamespaceLookupTestCase(TestBase):
# Evaluate B::func() - should call B::func()
self.expect_expr("B::func()", result_type="int", result_value="4")
- @unittest2.expectedFailure # lldb scope lookup of functions bugs
+ @unittest.expectedFailure # lldb scope lookup of functions bugs
def test_function_scope_lookup_with_run_command(self):
"""Test scope lookup of functions in lldb."""
self.build()
@@ -272,7 +272,7 @@ class NamespaceLookupTestCase(TestBase):
# Evaluate func2() - should call A::func2()
self.expect_expr("func2()", result_type="int", result_value="3")
- @unittest2.expectedFailure # lldb scope lookup after using declaration bugs
+ @unittest.expectedFailure # lldb scope lookup after using declaration bugs
# NOTE: this test may fail on older systems that don't emit import
# emtries in DWARF - may need to add checks for compiler versions here.
def test_scope_after_using_declaration_lookup_with_run_command(self):
@@ -294,7 +294,7 @@ class NamespaceLookupTestCase(TestBase):
# Evaluate func() - should call A::func()
self.expect_expr("func()", result_type="int", result_value="3")
- @unittest2.expectedFailure # lldb scope lookup ambiguity after using bugs
+ @unittest.expectedFailure # lldb scope lookup ambiguity after using bugs
def test_scope_ambiguity_after_using_lookup_with_run_command(self):
"""Test scope lookup ambiguity after using in lldb."""
self.build()
diff --git a/lldb/test/API/lang/cpp/reference-to-outer-type/TestCppReferenceToOuterClass.py b/lldb/test/API/lang/cpp/reference-to-outer-type/TestCppReferenceToOuterClass.py
index 3172b5f..a6e419b 100644
--- a/lldb/test/API/lang/cpp/reference-to-outer-type/TestCppReferenceToOuterClass.py
+++ b/lldb/test/API/lang/cpp/reference-to-outer-type/TestCppReferenceToOuterClass.py
@@ -1,4 +1,4 @@
-import unittest2
+import unittest
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
@@ -6,7 +6,7 @@ from lldbsuite.test import lldbutil
class TestCase(TestBase):
- @unittest2.expectedFailure # The fix for this was reverted due to llvm.org/PR52257
+ @unittest.expectedFailure # The fix for this was reverted due to llvm.org/PR52257
def test(self):
self.build()
self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
diff --git a/lldb/test/API/lang/cpp/thread_local/TestThreadLocal.py b/lldb/test/API/lang/cpp/thread_local/TestThreadLocal.py
index 9b128ba..0b63e15 100644
--- a/lldb/test/API/lang/cpp/thread_local/TestThreadLocal.py
+++ b/lldb/test/API/lang/cpp/thread_local/TestThreadLocal.py
@@ -39,6 +39,11 @@ class PlatformProcessCrashInfoTestCase(TestBase):
process.Kill()
lldbutil.run_to_breakpoint_do_run(self, target, main_bkpt)
+ # The test fails during tear down because the module isn't cleared.
+ # Even though this test case is marked as xfail, a failure during
+ # tear down still counts as an error.
+ main_module.Clear()
+
self.expect(
"expr tl_local_int",
error=True,
diff --git a/lldb/test/API/lang/objc/hidden-ivars/TestHiddenIvars.py b/lldb/test/API/lang/objc/hidden-ivars/TestHiddenIvars.py
index 479f437..f5e4eb1 100644
--- a/lldb/test/API/lang/objc/hidden-ivars/TestHiddenIvars.py
+++ b/lldb/test/API/lang/objc/hidden-ivars/TestHiddenIvars.py
@@ -3,7 +3,7 @@
import subprocess
-import unittest2
+import unittest
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
@@ -58,7 +58,7 @@ class HiddenIvarsTestCase(TestBase):
self.build()
self.frame_var(False)
- @unittest2.expectedFailure # rdar://18683637
+ @unittest.expectedFailure # rdar://18683637
def test_frame_variable_across_modules(self):
if self.getArchitecture() == "i386":
self.skipTest("requires modern objc runtime")
diff --git a/lldb/test/API/lldbtest.py b/lldb/test/API/lldbtest.py
index 2b44bb7..bae73e7 100644
--- a/lldb/test/API/lldbtest.py
+++ b/lldb/test/API/lldbtest.py
@@ -1,3 +1,4 @@
+import collections
import os
import re
import operator
@@ -86,20 +87,46 @@ class LLDBTest(TestFormat):
if timeoutInfo:
return lit.Test.TIMEOUT, output
- # Parse the dotest output from stderr.
- result_regex = r"\((\d+) passes, (\d+) failures, (\d+) errors, (\d+) skipped, (\d+) expected failures, (\d+) unexpected successes\)"
- results = re.search(result_regex, err)
+ # Parse the dotest output from stderr. First get the # of total tests, in order to infer the # of passes.
+ # Example: "Ran 5 tests in 0.042s"
+ num_ran_regex = r"^Ran (\d+) tests? in "
+ num_ran_results = re.search(num_ran_regex, err, re.MULTILINE)
+
+ # If parsing fails mark this test as unresolved.
+ if not num_ran_results:
+ return lit.Test.UNRESOLVED, output
+ num_ran = int(num_ran_results.group(1))
+
+ # Then look for a detailed summary, which is OK or FAILED followed by optional details.
+ # Example: "OK (skipped=1, expected failures=1)"
+ # Example: "FAILED (failures=3)"
+ # Example: "OK"
+ result_regex = r"^(?:OK|FAILED)(?: \((.*)\))?$"
+ results = re.search(result_regex, err, re.MULTILINE)
# If parsing fails mark this test as unresolved.
if not results:
return lit.Test.UNRESOLVED, output
- passes = int(results.group(1))
- failures = int(results.group(2))
- errors = int(results.group(3))
- skipped = int(results.group(4))
- expected_failures = int(results.group(5))
- unexpected_successes = int(results.group(6))
+ details = results.group(1)
+ parsed_details = collections.defaultdict(int)
+ if details:
+ for detail in details.split(", "):
+ detail_parts = detail.split("=")
+ if len(detail_parts) != 2:
+ return lit.Test.UNRESOLVED, output
+ parsed_details[detail_parts[0]] = int(detail_parts[1])
+
+ failures = parsed_details["failures"]
+ errors = parsed_details["errors"]
+ skipped = parsed_details["skipped"]
+ expected_failures = parsed_details["expected failures"]
+ unexpected_successes = parsed_details["unexpected successes"]
+
+ non_pass = (
+ failures + errors + skipped + expected_failures + unexpected_successes
+ )
+ passes = num_ran - non_pass
if exitCode:
# Mark this test as FAIL if at least one test failed.
diff --git a/lldb/test/API/macosx/universal/TestUniversal.py b/lldb/test/API/macosx/universal/TestUniversal.py
index 6e8c112..d988cc0 100644
--- a/lldb/test/API/macosx/universal/TestUniversal.py
+++ b/lldb/test/API/macosx/universal/TestUniversal.py
@@ -1,4 +1,4 @@
-import unittest2
+import unittest
import os
import lldb
from lldbsuite.test.decorators import *
@@ -24,7 +24,7 @@ class UniversalTestCase(TestBase):
@add_test_categories(["pyapi"])
@skipUnlessDarwin
- @unittest2.skipUnless(
+ @unittest.skipUnless(
hasattr(os, "uname") and os.uname()[4] in ["x86_64"], "requires x86_64"
)
@skipIfDarwinEmbedded # this test file assumes we're targetting an x86 system
@@ -50,7 +50,7 @@ class UniversalTestCase(TestBase):
self.assertTrue(process, PROCESS_IS_VALID)
@skipUnlessDarwin
- @unittest2.skipUnless(
+ @unittest.skipUnless(
hasattr(os, "uname") and os.uname()[4] in ["x86_64"], "requires x86_64"
)
@skipIfDarwinEmbedded # this test file assumes we're targetting an x86 system
@@ -115,7 +115,7 @@ class UniversalTestCase(TestBase):
self.runCmd("continue")
@skipUnlessDarwin
- @unittest2.skipUnless(
+ @unittest.skipUnless(
hasattr(os, "uname") and os.uname()[4] in ["x86_64"], "requires x86_64"
)
@skipIfDarwinEmbedded # this test file assumes we're targetting an x86 system
diff --git a/lldb/test/API/tools/lldb-server/libraries-svr4/TestGdbRemoteLibrariesSvr4Support.py b/lldb/test/API/tools/lldb-server/libraries-svr4/TestGdbRemoteLibrariesSvr4Support.py
index ac65d2d..846adad 100644
--- a/lldb/test/API/tools/lldb-server/libraries-svr4/TestGdbRemoteLibrariesSvr4Support.py
+++ b/lldb/test/API/tools/lldb-server/libraries-svr4/TestGdbRemoteLibrariesSvr4Support.py
@@ -72,7 +72,7 @@ class TestGdbRemoteLibrariesSvr4Support(gdbremote_testcase.GdbRemoteTestCaseBase
self.assertEqual(xml_root.tag, "library-list-svr4")
for child in xml_root:
self.assertEqual(child.tag, "library")
- self.assertItemsEqual(child.attrib.keys(), ["name", "lm", "l_addr", "l_ld"])
+ self.assertCountEqual(child.attrib.keys(), ["name", "lm", "l_addr", "l_ld"])
def libraries_svr4_has_correct_load_addr(self):
xml_root = self.get_libraries_svr4_xml()
diff --git a/lldb/test/API/tools/lldb-server/test/test_lldbgdbserverutils.py b/lldb/test/API/tools/lldb-server/test/test_lldbgdbserverutils.py
index 6a6fd02..9c9a73c 100644
--- a/lldb/test/API/tools/lldb-server/test/test_lldbgdbserverutils.py
+++ b/lldb/test/API/tools/lldb-server/test/test_lldbgdbserverutils.py
@@ -1,10 +1,10 @@
-import unittest2
+import unittest
import re
from lldbgdbserverutils import *
-class TestLldbGdbServerUtils(unittest2.TestCase):
+class TestLldbGdbServerUtils(unittest.TestCase):
def test_entry_exact_payload_match(self):
entry = GdbRemoteEntry(is_send_to_remote=False, exact_payload="$OK#9a")
entry.assert_match(self, "$OK#9a")
diff --git a/lldb/tools/lldb-dap/Breakpoint.cpp b/lldb/tools/lldb-dap/Breakpoint.cpp
new file mode 100644
index 0000000..0c33d4b
--- /dev/null
+++ b/lldb/tools/lldb-dap/Breakpoint.cpp
@@ -0,0 +1,76 @@
+//===-- Breakpoint.cpp ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "Breakpoint.h"
+#include "DAP.h"
+#include "JSONUtils.h"
+#include "llvm/ADT/StringExtras.h"
+
+using namespace lldb_dap;
+
+void Breakpoint::SetCondition() { bp.SetCondition(condition.c_str()); }
+
+void Breakpoint::SetHitCondition() {
+ uint64_t hitCount = 0;
+ if (llvm::to_integer(hitCondition, hitCount))
+ bp.SetIgnoreCount(hitCount - 1);
+}
+
+void Breakpoint::CreateJsonObject(llvm::json::Object &object) {
+ // Each breakpoint location is treated as a separate breakpoint for VS code.
+ // They don't have the notion of a single breakpoint with multiple locations.
+ if (!bp.IsValid())
+ return;
+ object.try_emplace("verified", bp.GetNumResolvedLocations() > 0);
+ object.try_emplace("id", bp.GetID());
+ // VS Code DAP doesn't currently allow one breakpoint to have multiple
+ // locations so we just report the first one. If we report all locations
+ // then the IDE starts showing the wrong line numbers and locations for
+ // other source file and line breakpoints in the same file.
+
+ // Below we search for the first resolved location in a breakpoint and report
+ // this as the breakpoint location since it will have a complete location
+ // that is at least loaded in the current process.
+ lldb::SBBreakpointLocation bp_loc;
+ const auto num_locs = bp.GetNumLocations();
+ for (size_t i = 0; i < num_locs; ++i) {
+ bp_loc = bp.GetLocationAtIndex(i);
+ if (bp_loc.IsResolved())
+ break;
+ }
+ // If not locations are resolved, use the first location.
+ if (!bp_loc.IsResolved())
+ bp_loc = bp.GetLocationAtIndex(0);
+ auto bp_addr = bp_loc.GetAddress();
+
+ if (bp_addr.IsValid()) {
+ std::string formatted_addr =
+ "0x" + llvm::utohexstr(bp_addr.GetLoadAddress(g_dap.target));
+ object.try_emplace("instructionReference", formatted_addr);
+ auto line_entry = bp_addr.GetLineEntry();
+ const auto line = line_entry.GetLine();
+ if (line != UINT32_MAX)
+ object.try_emplace("line", line);
+ const auto column = line_entry.GetColumn();
+ if (column != 0)
+ object.try_emplace("column", column);
+ object.try_emplace("source", CreateSource(line_entry));
+ }
+}
+
+bool Breakpoint::MatchesName(const char *name) { return bp.MatchesName(name); }
+
+void Breakpoint::SetBreakpoint() {
+ // See comments in BreakpointBase::GetBreakpointLabel() for details of why
+ // we add a label to our breakpoints.
+ bp.AddName(GetBreakpointLabel());
+ if (!condition.empty())
+ SetCondition();
+ if (!hitCondition.empty())
+ SetHitCondition();
+}
diff --git a/lldb/tools/lldb-dap/Breakpoint.h b/lldb/tools/lldb-dap/Breakpoint.h
new file mode 100644
index 0000000..47a9d9c
--- /dev/null
+++ b/lldb/tools/lldb-dap/Breakpoint.h
@@ -0,0 +1,33 @@
+//===-- Breakpoint.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLDB_TOOLS_LLDB_DAP_BREAKPOINT_H
+#define LLDB_TOOLS_LLDB_DAP_BREAKPOINT_H
+
+#include "BreakpointBase.h"
+
+namespace lldb_dap {
+
+struct Breakpoint : public BreakpointBase {
+ // The LLDB breakpoint associated wit this source breakpoint
+ lldb::SBBreakpoint bp;
+
+ Breakpoint() = default;
+ Breakpoint(const llvm::json::Object &obj) : BreakpointBase(obj){};
+ Breakpoint(lldb::SBBreakpoint bp) : bp(bp) {}
+
+ void SetCondition() override;
+ void SetHitCondition() override;
+ void CreateJsonObject(llvm::json::Object &object) override;
+
+ bool MatchesName(const char *name);
+ void SetBreakpoint();
+};
+} // namespace lldb_dap
+
+#endif
diff --git a/lldb/tools/lldb-dap/BreakpointBase.cpp b/lldb/tools/lldb-dap/BreakpointBase.cpp
index fb4b27f..519729f 100644
--- a/lldb/tools/lldb-dap/BreakpointBase.cpp
+++ b/lldb/tools/lldb-dap/BreakpointBase.cpp
@@ -8,306 +8,13 @@
#include "BreakpointBase.h"
#include "DAP.h"
-#include "JSONUtils.h"
#include "llvm/ADT/StringExtras.h"
using namespace lldb_dap;
BreakpointBase::BreakpointBase(const llvm::json::Object &obj)
: condition(std::string(GetString(obj, "condition"))),
- hitCondition(std::string(GetString(obj, "hitCondition"))),
- logMessage(std::string(GetString(obj, "logMessage"))) {}
-
-void BreakpointBase::SetCondition() { bp.SetCondition(condition.c_str()); }
-
-void BreakpointBase::SetHitCondition() {
- uint64_t hitCount = 0;
- if (llvm::to_integer(hitCondition, hitCount))
- bp.SetIgnoreCount(hitCount - 1);
-}
-
-lldb::SBError BreakpointBase::AppendLogMessagePart(llvm::StringRef part,
- bool is_expr) {
- if (is_expr) {
- logMessageParts.emplace_back(part, is_expr);
- } else {
- std::string formatted;
- lldb::SBError error = FormatLogText(part, formatted);
- if (error.Fail())
- return error;
- logMessageParts.emplace_back(formatted, is_expr);
- }
- return lldb::SBError();
-}
-
-// TODO: consolidate this code with the implementation in
-// FormatEntity::ParseInternal().
-lldb::SBError BreakpointBase::FormatLogText(llvm::StringRef text,
- std::string &formatted) {
- lldb::SBError error;
- while (!text.empty()) {
- size_t backslash_pos = text.find_first_of('\\');
- if (backslash_pos == std::string::npos) {
- formatted += text.str();
- return error;
- }
-
- formatted += text.substr(0, backslash_pos).str();
- // Skip the characters before and including '\'.
- text = text.drop_front(backslash_pos + 1);
-
- if (text.empty()) {
- error.SetErrorString(
- "'\\' character was not followed by another character");
- return error;
- }
-
- const char desens_char = text[0];
- text = text.drop_front(); // Skip the desensitized char character
- switch (desens_char) {
- case 'a':
- formatted.push_back('\a');
- break;
- case 'b':
- formatted.push_back('\b');
- break;
- case 'f':
- formatted.push_back('\f');
- break;
- case 'n':
- formatted.push_back('\n');
- break;
- case 'r':
- formatted.push_back('\r');
- break;
- case 't':
- formatted.push_back('\t');
- break;
- case 'v':
- formatted.push_back('\v');
- break;
- case '\'':
- formatted.push_back('\'');
- break;
- case '\\':
- formatted.push_back('\\');
- break;
- case '0':
- // 1 to 3 octal chars
- {
- if (text.empty()) {
- error.SetErrorString("missing octal number following '\\0'");
- return error;
- }
-
- // Make a string that can hold onto the initial zero char, up to 3
- // octal digits, and a terminating NULL.
- char oct_str[5] = {0, 0, 0, 0, 0};
-
- size_t i;
- for (i = 0;
- i < text.size() && i < 4 && (text[i] >= '0' && text[i] <= '7');
- ++i) {
- oct_str[i] = text[i];
- }
-
- text = text.drop_front(i);
- unsigned long octal_value = ::strtoul(oct_str, nullptr, 8);
- if (octal_value <= UINT8_MAX) {
- formatted.push_back((char)octal_value);
- } else {
- error.SetErrorString("octal number is larger than a single byte");
- return error;
- }
- }
- break;
-
- case 'x': {
- if (text.empty()) {
- error.SetErrorString("missing hex number following '\\x'");
- return error;
- }
- // hex number in the text
- if (isxdigit(text[0])) {
- // Make a string that can hold onto two hex chars plus a
- // NULL terminator
- char hex_str[3] = {0, 0, 0};
- hex_str[0] = text[0];
-
- text = text.drop_front();
-
- if (!text.empty() && isxdigit(text[0])) {
- hex_str[1] = text[0];
- text = text.drop_front();
- }
-
- unsigned long hex_value = strtoul(hex_str, nullptr, 16);
- if (hex_value <= UINT8_MAX) {
- formatted.push_back((char)hex_value);
- } else {
- error.SetErrorString("hex number is larger than a single byte");
- return error;
- }
- } else {
- formatted.push_back(desens_char);
- }
- break;
- }
-
- default:
- // Just desensitize any other character by just printing what came
- // after the '\'
- formatted.push_back(desens_char);
- break;
- }
- }
- return error;
-}
-
-// logMessage will be divided into array of LogMessagePart as two kinds:
-// 1. raw print text message, and
-// 2. interpolated expression for evaluation which is inside matching curly
-// braces.
-//
-// The function tries to parse logMessage into a list of LogMessageParts
-// for easy later access in BreakpointHitCallback.
-void BreakpointBase::SetLogMessage() {
- logMessageParts.clear();
-
- // Contains unmatched open curly braces indices.
- std::vector<int> unmatched_curly_braces;
-
- // Contains all matched curly braces in logMessage.
- // Loop invariant: matched_curly_braces_ranges are sorted by start index in
- // ascending order without any overlap between them.
- std::vector<std::pair<int, int>> matched_curly_braces_ranges;
-
- lldb::SBError error;
- // Part1 - parse matched_curly_braces_ranges.
- // locating all curly braced expression ranges in logMessage.
- // The algorithm takes care of nested and imbalanced curly braces.
- for (size_t i = 0; i < logMessage.size(); ++i) {
- if (logMessage[i] == '{') {
- unmatched_curly_braces.push_back(i);
- } else if (logMessage[i] == '}') {
- if (unmatched_curly_braces.empty())
- // Nothing to match.
- continue;
-
- int last_unmatched_index = unmatched_curly_braces.back();
- unmatched_curly_braces.pop_back();
-
- // Erase any matched ranges included in the new match.
- while (!matched_curly_braces_ranges.empty()) {
- assert(matched_curly_braces_ranges.back().first !=
- last_unmatched_index &&
- "How can a curley brace be matched twice?");
- if (matched_curly_braces_ranges.back().first < last_unmatched_index)
- break;
-
- // This is a nested range let's earse it.
- assert((size_t)matched_curly_braces_ranges.back().second < i);
- matched_curly_braces_ranges.pop_back();
- }
-
- // Assert invariant.
- assert(matched_curly_braces_ranges.empty() ||
- matched_curly_braces_ranges.back().first < last_unmatched_index);
- matched_curly_braces_ranges.emplace_back(last_unmatched_index, i);
- }
- }
-
- // Part2 - parse raw text and expresions parts.
- // All expression ranges have been parsed in matched_curly_braces_ranges.
- // The code below uses matched_curly_braces_ranges to divide logMessage
- // into raw text parts and expression parts.
- int last_raw_text_start = 0;
- for (const std::pair<int, int> &curly_braces_range :
- matched_curly_braces_ranges) {
- // Raw text before open curly brace.
- assert(curly_braces_range.first >= last_raw_text_start);
- size_t raw_text_len = curly_braces_range.first - last_raw_text_start;
- if (raw_text_len > 0) {
- error = AppendLogMessagePart(
- llvm::StringRef(logMessage.c_str() + last_raw_text_start,
- raw_text_len),
- /*is_expr=*/false);
- if (error.Fail()) {
- NotifyLogMessageError(error.GetCString());
- return;
- }
- }
-
- // Expression between curly braces.
- assert(curly_braces_range.second > curly_braces_range.first);
- size_t expr_len = curly_braces_range.second - curly_braces_range.first - 1;
- error = AppendLogMessagePart(
- llvm::StringRef(logMessage.c_str() + curly_braces_range.first + 1,
- expr_len),
- /*is_expr=*/true);
- if (error.Fail()) {
- NotifyLogMessageError(error.GetCString());
- return;
- }
-
- last_raw_text_start = curly_braces_range.second + 1;
- }
- // Trailing raw text after close curly brace.
- assert(last_raw_text_start >= 0);
- if (logMessage.size() > (size_t)last_raw_text_start) {
- error = AppendLogMessagePart(
- llvm::StringRef(logMessage.c_str() + last_raw_text_start,
- logMessage.size() - last_raw_text_start),
- /*is_expr=*/false);
- if (error.Fail()) {
- NotifyLogMessageError(error.GetCString());
- return;
- }
- }
-
- bp.SetCallback(BreakpointBase::BreakpointHitCallback, this);
-}
-
-void BreakpointBase::NotifyLogMessageError(llvm::StringRef error) {
- std::string message = "Log message has error: ";
- message += error;
- g_dap.SendOutput(OutputType::Console, message);
-}
-
-/*static*/
-bool BreakpointBase::BreakpointHitCallback(
- void *baton, lldb::SBProcess &process, lldb::SBThread &thread,
- lldb::SBBreakpointLocation &location) {
- if (!baton)
- return true;
-
- BreakpointBase *bp = (BreakpointBase *)baton;
- lldb::SBFrame frame = thread.GetSelectedFrame();
-
- std::string output;
- for (const BreakpointBase::LogMessagePart &messagePart :
- bp->logMessageParts) {
- if (messagePart.is_expr) {
- // Try local frame variables first before fall back to expression
- // evaluation
- const std::string &expr_str = messagePart.text;
- const char *expr = expr_str.c_str();
- lldb::SBValue value =
- frame.GetValueForVariablePath(expr, lldb::eDynamicDontRunTarget);
- if (value.GetError().Fail())
- value = frame.EvaluateExpression(expr);
- output += VariableDescription(value).display_value;
- } else {
- output += messagePart.text;
- }
- }
- if (!output.empty() && output.back() != '\n')
- output.push_back('\n'); // Ensure log message has line break.
- g_dap.SendOutput(OutputType::Console, output.c_str());
-
- // Do not stop.
- return false;
-}
+ hitCondition(std::string(GetString(obj, "hitCondition"))) {}
void BreakpointBase::UpdateBreakpoint(const BreakpointBase &request_bp) {
if (condition != request_bp.condition) {
@@ -318,10 +25,6 @@ void BreakpointBase::UpdateBreakpoint(const BreakpointBase &request_bp) {
hitCondition = request_bp.hitCondition;
SetHitCondition();
}
- if (logMessage != request_bp.logMessage) {
- logMessage = request_bp.logMessage;
- SetLogMessage();
- }
}
const char *BreakpointBase::GetBreakpointLabel() {
diff --git a/lldb/tools/lldb-dap/BreakpointBase.h b/lldb/tools/lldb-dap/BreakpointBase.h
index 41787f7..5a04bb2 100644
--- a/lldb/tools/lldb-dap/BreakpointBase.h
+++ b/lldb/tools/lldb-dap/BreakpointBase.h
@@ -9,7 +9,6 @@
#ifndef LLDB_TOOLS_LLDB_DAP_BREAKPOINTBASE_H
#define LLDB_TOOLS_LLDB_DAP_BREAKPOINTBASE_H
-#include "JSONUtils.h"
#include "lldb/API/SBBreakpoint.h"
#include "llvm/Support/JSON.h"
#include <string>
@@ -18,44 +17,24 @@
namespace lldb_dap {
struct BreakpointBase {
- // logMessage part can be either a raw text or an expression.
- struct LogMessagePart {
- LogMessagePart(llvm::StringRef text, bool is_expr)
- : text(text), is_expr(is_expr) {}
- std::string text;
- bool is_expr;
- };
+
// An optional expression for conditional breakpoints.
std::string condition;
// An optional expression that controls how many hits of the breakpoint are
// ignored. The backend is expected to interpret the expression as needed
std::string hitCondition;
- // If this attribute exists and is non-empty, the backend must not 'break'
- // (stop) but log the message instead. Expressions within {} are
- // interpolated.
- std::string logMessage;
- std::vector<LogMessagePart> logMessageParts;
- // The LLDB breakpoint associated wit this source breakpoint
- lldb::SBBreakpoint bp;
BreakpointBase() = default;
BreakpointBase(const llvm::json::Object &obj);
+ virtual ~BreakpointBase() = default;
- void SetCondition();
- void SetHitCondition();
- void SetLogMessage();
- void UpdateBreakpoint(const BreakpointBase &request_bp);
+ virtual void SetCondition() = 0;
+ virtual void SetHitCondition() = 0;
+ virtual void CreateJsonObject(llvm::json::Object &object) = 0;
- // Format \param text and return formatted text in \param formatted.
- // \return any formatting failures.
- lldb::SBError FormatLogText(llvm::StringRef text, std::string &formatted);
- lldb::SBError AppendLogMessagePart(llvm::StringRef part, bool is_expr);
- void NotifyLogMessageError(llvm::StringRef error);
+ void UpdateBreakpoint(const BreakpointBase &request_bp);
static const char *GetBreakpointLabel();
- static bool BreakpointHitCallback(void *baton, lldb::SBProcess &process,
- lldb::SBThread &thread,
- lldb::SBBreakpointLocation &location);
};
} // namespace lldb_dap
diff --git a/lldb/tools/lldb-dap/CMakeLists.txt b/lldb/tools/lldb-dap/CMakeLists.txt
index 554567eb..f8c0e4e 100644
--- a/lldb/tools/lldb-dap/CMakeLists.txt
+++ b/lldb/tools/lldb-dap/CMakeLists.txt
@@ -24,6 +24,7 @@ tablegen(LLVM Options.inc -gen-opt-parser-defs)
add_public_tablegen_target(LLDBDAPOptionsTableGen)
add_lldb_tool(lldb-dap
lldb-dap.cpp
+ Breakpoint.cpp
BreakpointBase.cpp
ExceptionBreakpoint.cpp
FifoFiles.cpp
diff --git a/lldb/tools/lldb-dap/FunctionBreakpoint.cpp b/lldb/tools/lldb-dap/FunctionBreakpoint.cpp
index d4bdb97..21743bf 100644
--- a/lldb/tools/lldb-dap/FunctionBreakpoint.cpp
+++ b/lldb/tools/lldb-dap/FunctionBreakpoint.cpp
@@ -12,21 +12,13 @@
namespace lldb_dap {
FunctionBreakpoint::FunctionBreakpoint(const llvm::json::Object &obj)
- : BreakpointBase(obj), functionName(std::string(GetString(obj, "name"))) {}
+ : Breakpoint(obj), functionName(std::string(GetString(obj, "name"))) {}
void FunctionBreakpoint::SetBreakpoint() {
if (functionName.empty())
return;
bp = g_dap.target.BreakpointCreateByName(functionName.c_str());
- // See comments in BreakpointBase::GetBreakpointLabel() for details of why
- // we add a label to our breakpoints.
- bp.AddName(GetBreakpointLabel());
- if (!condition.empty())
- SetCondition();
- if (!hitCondition.empty())
- SetHitCondition();
- if (!logMessage.empty())
- SetLogMessage();
+ Breakpoint::SetBreakpoint();
}
} // namespace lldb_dap
diff --git a/lldb/tools/lldb-dap/FunctionBreakpoint.h b/lldb/tools/lldb-dap/FunctionBreakpoint.h
index fc23e94..b15ff19 100644
--- a/lldb/tools/lldb-dap/FunctionBreakpoint.h
+++ b/lldb/tools/lldb-dap/FunctionBreakpoint.h
@@ -9,11 +9,11 @@
#ifndef LLDB_TOOLS_LLDB_DAP_FUNCTIONBREAKPOINT_H
#define LLDB_TOOLS_LLDB_DAP_FUNCTIONBREAKPOINT_H
-#include "BreakpointBase.h"
+#include "Breakpoint.h"
namespace lldb_dap {
-struct FunctionBreakpoint : public BreakpointBase {
+struct FunctionBreakpoint : public Breakpoint {
std::string functionName;
FunctionBreakpoint() = default;
diff --git a/lldb/tools/lldb-dap/JSONUtils.cpp b/lldb/tools/lldb-dap/JSONUtils.cpp
index a8b438d..878449a 100644
--- a/lldb/tools/lldb-dap/JSONUtils.cpp
+++ b/lldb/tools/lldb-dap/JSONUtils.cpp
@@ -364,54 +364,14 @@ llvm::json::Value CreateScope(const llvm::StringRef name,
// },
// "required": [ "verified" ]
// }
-llvm::json::Value CreateBreakpoint(lldb::SBBreakpoint &bp,
+llvm::json::Value CreateBreakpoint(BreakpointBase *bp,
std::optional<llvm::StringRef> request_path,
std::optional<uint32_t> request_line,
std::optional<uint32_t> request_column) {
- // Each breakpoint location is treated as a separate breakpoint for VS code.
- // They don't have the notion of a single breakpoint with multiple locations.
llvm::json::Object object;
- if (!bp.IsValid())
- return llvm::json::Value(std::move(object));
-
- object.try_emplace("verified", bp.GetNumResolvedLocations() > 0);
- object.try_emplace("id", bp.GetID());
- // VS Code DAP doesn't currently allow one breakpoint to have multiple
- // locations so we just report the first one. If we report all locations
- // then the IDE starts showing the wrong line numbers and locations for
- // other source file and line breakpoints in the same file.
-
- // Below we search for the first resolved location in a breakpoint and report
- // this as the breakpoint location since it will have a complete location
- // that is at least loaded in the current process.
- lldb::SBBreakpointLocation bp_loc;
- const auto num_locs = bp.GetNumLocations();
- for (size_t i = 0; i < num_locs; ++i) {
- bp_loc = bp.GetLocationAtIndex(i);
- if (bp_loc.IsResolved())
- break;
- }
- // If not locations are resolved, use the first location.
- if (!bp_loc.IsResolved())
- bp_loc = bp.GetLocationAtIndex(0);
- auto bp_addr = bp_loc.GetAddress();
-
if (request_path)
object.try_emplace("source", CreateSource(*request_path));
-
- if (bp_addr.IsValid()) {
- std::string formatted_addr =
- "0x" + llvm::utohexstr(bp_addr.GetLoadAddress(g_dap.target));
- object.try_emplace("instructionReference", formatted_addr);
- auto line_entry = bp_addr.GetLineEntry();
- const auto line = line_entry.GetLine();
- if (line != UINT32_MAX)
- object.try_emplace("line", line);
- const auto column = line_entry.GetColumn();
- if (column != 0)
- object.try_emplace("column", column);
- object.try_emplace("source", CreateSource(line_entry));
- }
+ bp->CreateJsonObject(object);
// We try to add request_line as a fallback
if (request_line)
object.try_emplace("line", *request_line);
@@ -506,7 +466,7 @@ llvm::json::Value CreateModule(lldb::SBModule &module) {
return llvm::json::Value(std::move(object));
}
-void AppendBreakpoint(lldb::SBBreakpoint &bp, llvm::json::Array &breakpoints,
+void AppendBreakpoint(BreakpointBase *bp, llvm::json::Array &breakpoints,
std::optional<llvm::StringRef> request_path,
std::optional<uint32_t> request_line) {
breakpoints.emplace_back(CreateBreakpoint(bp, request_path, request_line));
diff --git a/lldb/tools/lldb-dap/JSONUtils.h b/lldb/tools/lldb-dap/JSONUtils.h
index 6233854..1515f5b 100644
--- a/lldb/tools/lldb-dap/JSONUtils.h
+++ b/lldb/tools/lldb-dap/JSONUtils.h
@@ -9,6 +9,7 @@
#ifndef LLDB_TOOLS_LLDB_DAP_JSONUTILS_H
#define LLDB_TOOLS_LLDB_DAP_JSONUTILS_H
+#include "BreakpointBase.h"
#include "DAPForward.h"
#include "lldb/API/SBModule.h"
#include "llvm/ADT/StringRef.h"
@@ -191,7 +192,7 @@ void FillResponse(const llvm::json::Object &request,
/// provided by the setBreakpoints request are returned to the IDE as a
/// fallback.
void AppendBreakpoint(
- lldb::SBBreakpoint &bp, llvm::json::Array &breakpoints,
+ BreakpointBase *bp, llvm::json::Array &breakpoints,
std::optional<llvm::StringRef> request_path = std::nullopt,
std::optional<uint32_t> request_line = std::nullopt);
@@ -223,7 +224,7 @@ void AppendBreakpoint(
/// A "Breakpoint" JSON object with that follows the formal JSON
/// definition outlined by Microsoft.
llvm::json::Value
-CreateBreakpoint(lldb::SBBreakpoint &bp,
+CreateBreakpoint(BreakpointBase *bp,
std::optional<llvm::StringRef> request_path = std::nullopt,
std::optional<uint32_t> request_line = std::nullopt,
std::optional<uint32_t> request_column = std::nullopt);
diff --git a/lldb/tools/lldb-dap/SourceBreakpoint.cpp b/lldb/tools/lldb-dap/SourceBreakpoint.cpp
index 3bd83c0..f5dd134 100644
--- a/lldb/tools/lldb-dap/SourceBreakpoint.cpp
+++ b/lldb/tools/lldb-dap/SourceBreakpoint.cpp
@@ -12,22 +12,308 @@
namespace lldb_dap {
SourceBreakpoint::SourceBreakpoint(const llvm::json::Object &obj)
- : BreakpointBase(obj), line(GetUnsigned(obj, "line", 0)),
- column(GetUnsigned(obj, "column", 0)) {}
+ : Breakpoint(obj), logMessage(std::string(GetString(obj, "logMessage"))),
+ line(GetUnsigned(obj, "line", 0)), column(GetUnsigned(obj, "column", 0)) {
+}
void SourceBreakpoint::SetBreakpoint(const llvm::StringRef source_path) {
lldb::SBFileSpecList module_list;
bp = g_dap.target.BreakpointCreateByLocation(source_path.str().c_str(), line,
column, 0, module_list);
- // See comments in BreakpointBase::GetBreakpointLabel() for details of why
- // we add a label to our breakpoints.
- bp.AddName(GetBreakpointLabel());
- if (!condition.empty())
- SetCondition();
- if (!hitCondition.empty())
- SetHitCondition();
if (!logMessage.empty())
SetLogMessage();
+ Breakpoint::SetBreakpoint();
+}
+
+void SourceBreakpoint::UpdateBreakpoint(const SourceBreakpoint &request_bp) {
+ if (logMessage != request_bp.logMessage) {
+ logMessage = request_bp.logMessage;
+ SetLogMessage();
+ }
+ BreakpointBase::UpdateBreakpoint(request_bp);
+}
+
+lldb::SBError SourceBreakpoint::AppendLogMessagePart(llvm::StringRef part,
+ bool is_expr) {
+ if (is_expr) {
+ logMessageParts.emplace_back(part, is_expr);
+ } else {
+ std::string formatted;
+ lldb::SBError error = FormatLogText(part, formatted);
+ if (error.Fail())
+ return error;
+ logMessageParts.emplace_back(formatted, is_expr);
+ }
+ return lldb::SBError();
+}
+
+// TODO: consolidate this code with the implementation in
+// FormatEntity::ParseInternal().
+lldb::SBError SourceBreakpoint::FormatLogText(llvm::StringRef text,
+ std::string &formatted) {
+ lldb::SBError error;
+ while (!text.empty()) {
+ size_t backslash_pos = text.find_first_of('\\');
+ if (backslash_pos == std::string::npos) {
+ formatted += text.str();
+ return error;
+ }
+
+ formatted += text.substr(0, backslash_pos).str();
+ // Skip the characters before and including '\'.
+ text = text.drop_front(backslash_pos + 1);
+
+ if (text.empty()) {
+ error.SetErrorString(
+ "'\\' character was not followed by another character");
+ return error;
+ }
+
+ const char desens_char = text[0];
+ text = text.drop_front(); // Skip the desensitized char character
+ switch (desens_char) {
+ case 'a':
+ formatted.push_back('\a');
+ break;
+ case 'b':
+ formatted.push_back('\b');
+ break;
+ case 'f':
+ formatted.push_back('\f');
+ break;
+ case 'n':
+ formatted.push_back('\n');
+ break;
+ case 'r':
+ formatted.push_back('\r');
+ break;
+ case 't':
+ formatted.push_back('\t');
+ break;
+ case 'v':
+ formatted.push_back('\v');
+ break;
+ case '\'':
+ formatted.push_back('\'');
+ break;
+ case '\\':
+ formatted.push_back('\\');
+ break;
+ case '0':
+ // 1 to 3 octal chars
+ {
+ if (text.empty()) {
+ error.SetErrorString("missing octal number following '\\0'");
+ return error;
+ }
+
+ // Make a string that can hold onto the initial zero char, up to 3
+ // octal digits, and a terminating NULL.
+ char oct_str[5] = {0, 0, 0, 0, 0};
+
+ size_t i;
+ for (i = 0;
+ i < text.size() && i < 4 && (text[i] >= '0' && text[i] <= '7');
+ ++i) {
+ oct_str[i] = text[i];
+ }
+
+ text = text.drop_front(i);
+ unsigned long octal_value = ::strtoul(oct_str, nullptr, 8);
+ if (octal_value <= UINT8_MAX) {
+ formatted.push_back((char)octal_value);
+ } else {
+ error.SetErrorString("octal number is larger than a single byte");
+ return error;
+ }
+ }
+ break;
+
+ case 'x': {
+ if (text.empty()) {
+ error.SetErrorString("missing hex number following '\\x'");
+ return error;
+ }
+ // hex number in the text
+ if (isxdigit(text[0])) {
+ // Make a string that can hold onto two hex chars plus a
+ // NULL terminator
+ char hex_str[3] = {0, 0, 0};
+ hex_str[0] = text[0];
+
+ text = text.drop_front();
+
+ if (!text.empty() && isxdigit(text[0])) {
+ hex_str[1] = text[0];
+ text = text.drop_front();
+ }
+
+ unsigned long hex_value = strtoul(hex_str, nullptr, 16);
+ if (hex_value <= UINT8_MAX) {
+ formatted.push_back((char)hex_value);
+ } else {
+ error.SetErrorString("hex number is larger than a single byte");
+ return error;
+ }
+ } else {
+ formatted.push_back(desens_char);
+ }
+ break;
+ }
+
+ default:
+ // Just desensitize any other character by just printing what came
+ // after the '\'
+ formatted.push_back(desens_char);
+ break;
+ }
+ }
+ return error;
+}
+
+// logMessage will be divided into array of LogMessagePart as two kinds:
+// 1. raw print text message, and
+// 2. interpolated expression for evaluation which is inside matching curly
+// braces.
+//
+// The function tries to parse logMessage into a list of LogMessageParts
+// for easy later access in BreakpointHitCallback.
+void SourceBreakpoint::SetLogMessage() {
+ logMessageParts.clear();
+
+ // Contains unmatched open curly braces indices.
+ std::vector<int> unmatched_curly_braces;
+
+ // Contains all matched curly braces in logMessage.
+ // Loop invariant: matched_curly_braces_ranges are sorted by start index in
+ // ascending order without any overlap between them.
+ std::vector<std::pair<int, int>> matched_curly_braces_ranges;
+
+ lldb::SBError error;
+ // Part1 - parse matched_curly_braces_ranges.
+ // locating all curly braced expression ranges in logMessage.
+ // The algorithm takes care of nested and imbalanced curly braces.
+ for (size_t i = 0; i < logMessage.size(); ++i) {
+ if (logMessage[i] == '{') {
+ unmatched_curly_braces.push_back(i);
+ } else if (logMessage[i] == '}') {
+ if (unmatched_curly_braces.empty())
+ // Nothing to match.
+ continue;
+
+ int last_unmatched_index = unmatched_curly_braces.back();
+ unmatched_curly_braces.pop_back();
+
+ // Erase any matched ranges included in the new match.
+ while (!matched_curly_braces_ranges.empty()) {
+ assert(matched_curly_braces_ranges.back().first !=
+ last_unmatched_index &&
+ "How can a curley brace be matched twice?");
+ if (matched_curly_braces_ranges.back().first < last_unmatched_index)
+ break;
+
+ // This is a nested range let's earse it.
+ assert((size_t)matched_curly_braces_ranges.back().second < i);
+ matched_curly_braces_ranges.pop_back();
+ }
+
+ // Assert invariant.
+ assert(matched_curly_braces_ranges.empty() ||
+ matched_curly_braces_ranges.back().first < last_unmatched_index);
+ matched_curly_braces_ranges.emplace_back(last_unmatched_index, i);
+ }
+ }
+
+ // Part2 - parse raw text and expresions parts.
+ // All expression ranges have been parsed in matched_curly_braces_ranges.
+ // The code below uses matched_curly_braces_ranges to divide logMessage
+ // into raw text parts and expression parts.
+ int last_raw_text_start = 0;
+ for (const std::pair<int, int> &curly_braces_range :
+ matched_curly_braces_ranges) {
+ // Raw text before open curly brace.
+ assert(curly_braces_range.first >= last_raw_text_start);
+ size_t raw_text_len = curly_braces_range.first - last_raw_text_start;
+ if (raw_text_len > 0) {
+ error = AppendLogMessagePart(
+ llvm::StringRef(logMessage.c_str() + last_raw_text_start,
+ raw_text_len),
+ /*is_expr=*/false);
+ if (error.Fail()) {
+ NotifyLogMessageError(error.GetCString());
+ return;
+ }
+ }
+
+ // Expression between curly braces.
+ assert(curly_braces_range.second > curly_braces_range.first);
+ size_t expr_len = curly_braces_range.second - curly_braces_range.first - 1;
+ error = AppendLogMessagePart(
+ llvm::StringRef(logMessage.c_str() + curly_braces_range.first + 1,
+ expr_len),
+ /*is_expr=*/true);
+ if (error.Fail()) {
+ NotifyLogMessageError(error.GetCString());
+ return;
+ }
+
+ last_raw_text_start = curly_braces_range.second + 1;
+ }
+ // Trailing raw text after close curly brace.
+ assert(last_raw_text_start >= 0);
+ if (logMessage.size() > (size_t)last_raw_text_start) {
+ error = AppendLogMessagePart(
+ llvm::StringRef(logMessage.c_str() + last_raw_text_start,
+ logMessage.size() - last_raw_text_start),
+ /*is_expr=*/false);
+ if (error.Fail()) {
+ NotifyLogMessageError(error.GetCString());
+ return;
+ }
+ }
+
+ bp.SetCallback(BreakpointHitCallback, this);
+}
+
+void SourceBreakpoint::NotifyLogMessageError(llvm::StringRef error) {
+ std::string message = "Log message has error: ";
+ message += error;
+ g_dap.SendOutput(OutputType::Console, message);
+}
+
+/*static*/
+bool SourceBreakpoint::BreakpointHitCallback(
+ void *baton, lldb::SBProcess &process, lldb::SBThread &thread,
+ lldb::SBBreakpointLocation &location) {
+ if (!baton)
+ return true;
+
+ SourceBreakpoint *bp = (SourceBreakpoint *)baton;
+ lldb::SBFrame frame = thread.GetSelectedFrame();
+
+ std::string output;
+ for (const SourceBreakpoint::LogMessagePart &messagePart :
+ bp->logMessageParts) {
+ if (messagePart.is_expr) {
+ // Try local frame variables first before fall back to expression
+ // evaluation
+ const std::string &expr_str = messagePart.text;
+ const char *expr = expr_str.c_str();
+ lldb::SBValue value =
+ frame.GetValueForVariablePath(expr, lldb::eDynamicDontRunTarget);
+ if (value.GetError().Fail())
+ value = frame.EvaluateExpression(expr);
+ output += VariableDescription(value).display_value;
+ } else {
+ output += messagePart.text;
+ }
+ }
+ if (!output.empty() && output.back() != '\n')
+ output.push_back('\n'); // Ensure log message has line break.
+ g_dap.SendOutput(OutputType::Console, output.c_str());
+
+ // Do not stop.
+ return false;
}
} // namespace lldb_dap
diff --git a/lldb/tools/lldb-dap/SourceBreakpoint.h b/lldb/tools/lldb-dap/SourceBreakpoint.h
index f4b54a4..aa3fbe6 100644
--- a/lldb/tools/lldb-dap/SourceBreakpoint.h
+++ b/lldb/tools/lldb-dap/SourceBreakpoint.h
@@ -9,21 +9,45 @@
#ifndef LLDB_TOOLS_LLDB_DAP_SOURCEBREAKPOINT_H
#define LLDB_TOOLS_LLDB_DAP_SOURCEBREAKPOINT_H
-#include "BreakpointBase.h"
+#include "Breakpoint.h"
#include "llvm/ADT/StringRef.h"
namespace lldb_dap {
-struct SourceBreakpoint : public BreakpointBase {
+struct SourceBreakpoint : public Breakpoint {
+ // logMessage part can be either a raw text or an expression.
+ struct LogMessagePart {
+ LogMessagePart(llvm::StringRef text, bool is_expr)
+ : text(text), is_expr(is_expr) {}
+ std::string text;
+ bool is_expr;
+ };
+ // If this attribute exists and is non-empty, the backend must not 'break'
+ // (stop) but log the message instead. Expressions within {} are
+ // interpolated.
+ std::string logMessage;
+ std::vector<LogMessagePart> logMessageParts;
uint32_t line; ///< The source line of the breakpoint or logpoint
uint32_t column; ///< An optional source column of the breakpoint
- SourceBreakpoint() : BreakpointBase(), line(0), column(0) {}
+ SourceBreakpoint() : Breakpoint(), line(0), column(0) {}
SourceBreakpoint(const llvm::json::Object &obj);
// Set this breakpoint in LLDB as a new breakpoint
void SetBreakpoint(const llvm::StringRef source_path);
+ void UpdateBreakpoint(const SourceBreakpoint &request_bp);
+
+ void SetLogMessage();
+ // Format \param text and return formatted text in \param formatted.
+ // \return any formatting failures.
+ lldb::SBError FormatLogText(llvm::StringRef text, std::string &formatted);
+ lldb::SBError AppendLogMessagePart(llvm::StringRef part, bool is_expr);
+ void NotifyLogMessageError(llvm::StringRef error);
+
+ static bool BreakpointHitCallback(void *baton, lldb::SBProcess &process,
+ lldb::SBThread &thread,
+ lldb::SBBreakpointLocation &location);
};
inline bool operator<(const SourceBreakpoint &lhs,
diff --git a/lldb/tools/lldb-dap/lldb-dap.cpp b/lldb/tools/lldb-dap/lldb-dap.cpp
index 01494dc..6702234 100644
--- a/lldb/tools/lldb-dap/lldb-dap.cpp
+++ b/lldb/tools/lldb-dap/lldb-dap.cpp
@@ -525,7 +525,8 @@ void EventThreadFunction() {
if (event_mask & lldb::SBTarget::eBroadcastBitBreakpointChanged) {
auto event_type =
lldb::SBBreakpoint::GetBreakpointEventTypeFromEvent(event);
- auto bp = lldb::SBBreakpoint::GetBreakpointFromEvent(event);
+ auto bp =
+ Breakpoint(lldb::SBBreakpoint::GetBreakpointFromEvent(event));
// If the breakpoint was originated from the IDE, it will have the
// BreakpointBase::GetBreakpointLabel() label attached. Regardless
// of wether the locations were added or removed, the breakpoint
@@ -541,7 +542,7 @@ void EventThreadFunction() {
// mapped. Note that CreateBreakpoint doesn't apply source mapping.
// Besides, the current implementation of VSCode ignores the
// "source" element of breakpoint events.
- llvm::json::Value source_bp = CreateBreakpoint(bp);
+ llvm::json::Value source_bp = CreateBreakpoint(&bp);
source_bp.getAsObject()->erase("source");
body.try_emplace("breakpoint", source_bp);
@@ -2345,7 +2346,7 @@ void request_setBreakpoints(const llvm::json::Object &request) {
existing_source_bps->second.find(src_bp.line);
if (existing_bp != existing_source_bps->second.end()) {
existing_bp->second.UpdateBreakpoint(src_bp);
- AppendBreakpoint(existing_bp->second.bp, response_breakpoints, path,
+ AppendBreakpoint(&existing_bp->second, response_breakpoints, path,
src_bp.line);
continue;
}
@@ -2354,7 +2355,7 @@ void request_setBreakpoints(const llvm::json::Object &request) {
g_dap.source_breakpoints[path][src_bp.line] = src_bp;
SourceBreakpoint &new_bp = g_dap.source_breakpoints[path][src_bp.line];
new_bp.SetBreakpoint(path.data());
- AppendBreakpoint(new_bp.bp, response_breakpoints, path, new_bp.line);
+ AppendBreakpoint(&new_bp, response_breakpoints, path, new_bp.line);
}
}
}
@@ -2567,7 +2568,7 @@ void request_setFunctionBreakpoints(const llvm::json::Object &request) {
// handled it here and we don't need to set a new breakpoint below.
request_bps.erase(request_pos);
// Add this breakpoint info to the response
- AppendBreakpoint(pair.second.bp, response_breakpoints);
+ AppendBreakpoint(&pair.second, response_breakpoints);
}
}
// Remove any breakpoints that are no longer in our list
@@ -2581,7 +2582,7 @@ void request_setFunctionBreakpoints(const llvm::json::Object &request) {
g_dap.function_breakpoints[pair.first()] = std::move(pair.second);
FunctionBreakpoint &new_bp = g_dap.function_breakpoints[pair.first()];
new_bp.SetBreakpoint();
- AppendBreakpoint(new_bp.bp, response_breakpoints);
+ AppendBreakpoint(&new_bp, response_breakpoints);
}
llvm::json::Object body;
@@ -3582,8 +3583,8 @@ void request__testGetTargetBreakpoints(const llvm::json::Object &request) {
FillResponse(request, response);
llvm::json::Array response_breakpoints;
for (uint32_t i = 0; g_dap.target.GetBreakpointAtIndex(i).IsValid(); ++i) {
- auto bp = g_dap.target.GetBreakpointAtIndex(i);
- AppendBreakpoint(bp, response_breakpoints);
+ auto bp = Breakpoint(g_dap.target.GetBreakpointAtIndex(i));
+ AppendBreakpoint(&bp, response_breakpoints);
}
llvm::json::Object body;
body.try_emplace("breakpoints", std::move(response_breakpoints));
diff --git a/lldb/unittests/ScriptInterpreter/Python/PythonTestSuite.cpp b/lldb/unittests/ScriptInterpreter/Python/PythonTestSuite.cpp
index 7f3359f..5f0cc4c 100644
--- a/lldb/unittests/ScriptInterpreter/Python/PythonTestSuite.cpp
+++ b/lldb/unittests/ScriptInterpreter/Python/PythonTestSuite.cpp
@@ -218,6 +218,14 @@ bool lldb_private::python::SWIGBridge::LLDBSwigPythonCallCommandObject(
return false;
}
+bool lldb_private::python::SWIGBridge::LLDBSwigPythonCallParsedCommandObject(
+ PyObject *implementor, lldb::DebuggerSP debugger,
+ StructuredDataImpl &args_impl,
+ lldb_private::CommandReturnObject &cmd_retobj,
+ lldb::ExecutionContextRefSP exe_ctx_ref_sp) {
+ return false;
+}
+
bool lldb_private::python::SWIGBridge::LLDBSwigPythonCallModuleInit(
const char *python_module_name, const char *session_dictionary_name,
lldb::DebuggerSP debugger) {
diff --git a/lldb/unittests/SymbolFile/DWARF/CMakeLists.txt b/lldb/unittests/SymbolFile/DWARF/CMakeLists.txt
index 4a37ece..d5b0be7 100644
--- a/lldb/unittests/SymbolFile/DWARF/CMakeLists.txt
+++ b/lldb/unittests/SymbolFile/DWARF/CMakeLists.txt
@@ -1,5 +1,6 @@
add_lldb_unittest(SymbolFileDWARFTests
DWARFASTParserClangTests.cpp
+ DWARFDebugNamesIndexTest.cpp
DWARFDIETest.cpp
DWARFIndexCachingTest.cpp
DWARFUnitTest.cpp
diff --git a/lldb/unittests/SymbolFile/DWARF/DWARFDebugNamesIndexTest.cpp b/lldb/unittests/SymbolFile/DWARF/DWARFDebugNamesIndexTest.cpp
new file mode 100644
index 0000000..e56e628
--- /dev/null
+++ b/lldb/unittests/SymbolFile/DWARF/DWARFDebugNamesIndexTest.cpp
@@ -0,0 +1,208 @@
+//===-- DWARFDIETest.cpp ----------------------------------------------=---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "Plugins/SymbolFile/DWARF/DWARFDIE.h"
+#include "Plugins/SymbolFile/DWARF/DWARFDebugInfo.h"
+#include "Plugins/SymbolFile/DWARF/DWARFDeclContext.h"
+#include "Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.h"
+#include "TestingSupport/Symbol/YAMLModuleTester.h"
+#include "llvm/ADT/STLExtras.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+using namespace lldb;
+using namespace lldb_private;
+using namespace lldb_private::plugin::dwarf;
+using StringRef = llvm::StringRef;
+
+static void
+check_num_matches(DebugNamesDWARFIndex &index, int expected_num_matches,
+ llvm::ArrayRef<DWARFDeclContext::Entry> ctx_entries) {
+ DWARFDeclContext ctx(ctx_entries);
+ int num_matches = 0;
+
+ index.GetFullyQualifiedType(ctx, [&](DWARFDIE die) {
+ num_matches++;
+ return true;
+ });
+ ASSERT_EQ(num_matches, expected_num_matches);
+}
+
+static DWARFDeclContext::Entry make_entry(const char *c) {
+ return DWARFDeclContext::Entry(dwarf::DW_TAG_class_type, c);
+}
+
+TEST(DWARFDebugNamesIndexTest, FullyQualifiedQueryWithIDXParent) {
+ const char *yamldata = R"(
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_EXEC
+ Machine: EM_386
+DWARF:
+ debug_str:
+ - '1'
+ - '2'
+ - '3'
+ debug_abbrev:
+ - Table:
+ # We intentionally don't nest types in debug_info: if the nesting is not
+ # inferred from debug_names, we want the test to fail.
+ - Code: 0x1
+ Tag: DW_TAG_compile_unit
+ Children: DW_CHILDREN_yes
+ - Code: 0x2
+ Tag: DW_TAG_class_type
+ Children: DW_CHILDREN_no
+ Attributes:
+ - Attribute: DW_AT_name
+ Form: DW_FORM_strp
+ debug_info:
+ - Version: 4
+ AddrSize: 8
+ Entries:
+ - AbbrCode: 0x1
+ - AbbrCode: 0x2
+ Values:
+ - Value: 0x0 # Name "1"
+ - AbbrCode: 0x2
+ Values:
+ - Value: 0x2 # Name "2"
+ - AbbrCode: 0x2
+ Values:
+ - Value: 0x4 # Name "3"
+ - AbbrCode: 0x0
+ debug_names:
+ Abbreviations:
+ - Code: 0x11
+ Tag: DW_TAG_class_type
+ Indices:
+ - Idx: DW_IDX_parent
+ Form: DW_FORM_flag_present
+ - Idx: DW_IDX_die_offset
+ Form: DW_FORM_ref4
+ - Code: 0x22
+ Tag: DW_TAG_class_type
+ Indices:
+ - Idx: DW_IDX_parent
+ Form: DW_FORM_ref4
+ - Idx: DW_IDX_die_offset
+ Form: DW_FORM_ref4
+ Entries:
+ - Name: 0x0 # strp to Name1
+ Code: 0x11
+ Values:
+ - 0xc # Die offset to entry named "1"
+ - Name: 0x2 # strp to Name2
+ Code: 0x22
+ Values:
+ - 0x0 # Parent = First entry ("1")
+ - 0x11 # Die offset to entry named "1:2"
+ - Name: 0x4 # strp to Name3
+ Code: 0x22
+ Values:
+ - 0x6 # Parent = Second entry ("1::2")
+ - 0x16 # Die offset to entry named "1::2::3"
+ - Name: 0x4 # strp to Name3
+ Code: 0x11
+ Values:
+ - 0x16 # Die offset to entry named "3"
+)";
+
+ YAMLModuleTester t(yamldata);
+ auto *symbol_file =
+ llvm::cast<SymbolFileDWARF>(t.GetModule()->GetSymbolFile());
+ auto *index = static_cast<DebugNamesDWARFIndex *>(symbol_file->getIndex());
+ ASSERT_NE(index, nullptr);
+
+ check_num_matches(*index, 1, {make_entry("1")});
+ check_num_matches(*index, 1, {make_entry("2"), make_entry("1")});
+ check_num_matches(*index, 1,
+ {make_entry("3"), make_entry("2"), make_entry("1")});
+ check_num_matches(*index, 0, {make_entry("2")});
+ check_num_matches(*index, 1, {make_entry("3")});
+}
+
+TEST(DWARFDebugNamesIndexTest, FullyQualifiedQueryWithoutIDXParent) {
+ const char *yamldata = R"(
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_EXEC
+ Machine: EM_386
+DWARF:
+ debug_str:
+ - '1'
+ - '2'
+ debug_abbrev:
+ - Table:
+ - Code: 0x1
+ Tag: DW_TAG_compile_unit
+ Children: DW_CHILDREN_yes
+ - Code: 0x2
+ Tag: DW_TAG_class_type
+ Children: DW_CHILDREN_yes
+ Attributes:
+ - Attribute: DW_AT_name
+ Form: DW_FORM_strp
+ - Code: 0x3
+ Tag: DW_TAG_class_type
+ Children: DW_CHILDREN_no
+ Attributes:
+ - Attribute: DW_AT_name
+ Form: DW_FORM_strp
+ debug_info:
+ - Version: 4
+ AddrSize: 8
+ Entries:
+ - AbbrCode: 0x1
+ - AbbrCode: 0x2
+ Values:
+ - Value: 0x0 # Name "1"
+ - AbbrCode: 0x3
+ Values:
+ - Value: 0x2 # Name "2"
+ - AbbrCode: 0x0
+ - AbbrCode: 0x3
+ Values:
+ - Value: 0x2 # Name "2"
+ - AbbrCode: 0x0
+ debug_names:
+ Abbreviations:
+ - Code: 0x1
+ Tag: DW_TAG_class_type
+ Indices:
+ - Idx: DW_IDX_die_offset
+ Form: DW_FORM_ref4
+ Entries:
+ - Name: 0x0 # strp to Name1
+ Code: 0x1
+ Values:
+ - 0xc # Die offset to entry named "1"
+ - Name: 0x2 # strp to Name2
+ Code: 0x1
+ Values:
+ - 0x11 # Die offset to entry named "1::2"
+ - Name: 0x2 # strp to Name2
+ Code: 0x1
+ Values:
+ - 0x17 # Die offset to entry named "2"
+)";
+
+ YAMLModuleTester t(yamldata);
+ auto *symbol_file =
+ llvm::cast<SymbolFileDWARF>(t.GetModule()->GetSymbolFile());
+ auto *index = static_cast<DebugNamesDWARFIndex *>(symbol_file->getIndex());
+ ASSERT_NE(index, nullptr);
+
+ check_num_matches(*index, 1, {make_entry("1")});
+ check_num_matches(*index, 1, {make_entry("2"), make_entry("1")});
+ check_num_matches(*index, 1, {make_entry("2")});
+}
diff --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt
index 81f2753..a760a19 100644
--- a/llvm/CMakeLists.txt
+++ b/llvm/CMakeLists.txt
@@ -1256,8 +1256,10 @@ if( LLVM_INCLUDE_TESTS )
get_property(LLVM_ALL_LIT_DEPENDS GLOBAL PROPERTY LLVM_ALL_LIT_DEPENDS)
get_property(LLVM_ALL_ADDITIONAL_TEST_DEPENDS
GLOBAL PROPERTY LLVM_ALL_ADDITIONAL_TEST_DEPENDS)
- add_custom_target(test-depends
- DEPENDS ${LLVM_ALL_LIT_DEPENDS} ${LLVM_ALL_ADDITIONAL_TEST_DEPENDS})
+ add_custom_target(test-depends)
+ if(LLVM_ALL_LIT_DEPENDS OR LLVM_ALL_ADDITIONAL_TEST_DEPENDS)
+ add_dependencies(test-depends ${LLVM_ALL_LIT_DEPENDS} ${LLVM_ALL_ADDITIONAL_TEST_DEPENDS})
+ endif()
set_target_properties(test-depends PROPERTIES FOLDER "Tests")
add_dependencies(check-all test-depends)
endif()
diff --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst
index ddd5fd4..0c588c8 100644
--- a/llvm/docs/AMDGPUUsage.rst
+++ b/llvm/docs/AMDGPUUsage.rst
@@ -567,7 +567,7 @@ greater than or equal to the version in which the processor was added to the gen
- ``v_dot2_f32_f16``
- ``gfx10.1-generic`` ``amdgcn`` - ``gfx1010`` - xnack - Absolute flat - The following instructions are
+ ``gfx10-1-generic`` ``amdgcn`` - ``gfx1010`` - xnack - Absolute flat - The following instructions are
- ``gfx1011`` - wavefrontsize64 scratch not available on ``gfx1011``
- ``gfx1012`` - cumode and ``gfx1012``
- ``gfx1013``
@@ -586,7 +586,7 @@ greater than or equal to the version in which the processor was added to the gen
``gfx1013``
- ``gfx10.3-generic`` ``amdgcn`` - ``gfx1030`` - wavefrontsize64 - Absolute flat No restrictions.
+ ``gfx10-3-generic`` ``amdgcn`` - ``gfx1030`` - wavefrontsize64 - Absolute flat No restrictions.
- ``gfx1031`` - cumode scratch
- ``gfx1032``
- ``gfx1033``
@@ -1860,8 +1860,8 @@ The AMDGPU backend uses the following ELF header:
*reserved* 0x04f Reserved.
*reserved* 0x050 Reserved.
``EF_AMDGPU_MACH_AMDGCN_GFX9_GENERIC`` 0x051 ``gfx9-generic``
- ``EF_AMDGPU_MACH_AMDGCN_GFX10_1_GENERIC`` 0x052 ``gfx10.1-generic``
- ``EF_AMDGPU_MACH_AMDGCN_GFX10_3_GENERIC`` 0x053 ``gfx10.3-generic``
+ ``EF_AMDGPU_MACH_AMDGCN_GFX10_1_GENERIC`` 0x052 ``gfx10-1-generic``
+ ``EF_AMDGPU_MACH_AMDGCN_GFX10_3_GENERIC`` 0x053 ``gfx10-3-generic``
``EF_AMDGPU_MACH_AMDGCN_GFX11_GENERIC`` 0x054 ``gfx11-generic``
*reserved* 0x055 Reserved.
========================================== ========== =============================
@@ -5515,7 +5515,10 @@ additional 256 bytes to the kernel_code_entry_byte_offset. This addition
facilitates the incorporation of a prologue to the kernel entry to handle cases
where code designed for kernarg preloading is executed on hardware equipped with
incompatible firmware. If hardware has compatible firmware the 256 bytes at the
-start of the kernel entry will be skipped.
+start of the kernel entry will be skipped. Additionally, the compiler backend
+may insert a trap instruction at the start of the kernel prologue to manage
+situations where kernarg preloading is attempted on hardware with incompatible
+firmware.
.. _amdgpu-amdhsa-kernel-prolog:
@@ -12288,8 +12291,8 @@ table :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx10-gfx11-table`.
before invalidating
the caches.
- 3. buffer_gl0_inv;
- buffer_gl1_inv
+ 3. buffer_gl1_inv;
+ buffer_gl0_inv
- Must happen before
any following
@@ -12318,8 +12321,8 @@ table :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx10-gfx11-table`.
before invalidating
the caches.
- 3. buffer_gl0_inv;
- buffer_gl1_inv
+ 3. buffer_gl1_inv;
+ buffer_gl0_inv
- Must happen before
any following
@@ -12425,8 +12428,8 @@ table :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx10-gfx11-table`.
invalidating the
caches.
- 3. buffer_gl0_inv;
- buffer_gl1_inv
+ 3. buffer_gl1_inv;
+ buffer_gl0_inv
- Must happen before
any following
@@ -12456,8 +12459,8 @@ table :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx10-gfx11-table`.
invalidating the
caches.
- 3. buffer_gl0_inv;
- buffer_gl1_inv
+ 3. buffer_gl1_inv;
+ buffer_gl0_inv
- Must happen before
any following
@@ -12652,8 +12655,8 @@ table :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx10-gfx11-table`.
the
fence-paired-atomic.
- 2. buffer_gl0_inv;
- buffer_gl1_inv
+ 2. buffer_gl1_inv;
+ buffer_gl0_inv
- Must happen before any
following global/generic
@@ -13366,8 +13369,8 @@ table :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx10-gfx11-table`.
invalidating the
caches.
- 4. buffer_gl0_inv;
- buffer_gl1_inv
+ 4. buffer_gl1_inv;
+ buffer_gl0_inv
- Must happen before
any following
@@ -13441,8 +13444,8 @@ table :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx10-gfx11-table`.
invalidating the
caches.
- 4. buffer_gl0_inv;
- buffer_gl1_inv
+ 4. buffer_gl1_inv;
+ buffer_gl0_inv
- Must happen before
any following
@@ -13669,8 +13672,8 @@ table :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx10-gfx11-table`.
requirements of
release.
- 2. buffer_gl0_inv;
- buffer_gl1_inv
+ 2. buffer_gl1_inv;
+ buffer_gl0_inv
- Must happen before
any following
diff --git a/llvm/docs/CodingStandards.rst b/llvm/docs/CodingStandards.rst
index 7ba20c0..63df5af 100644
--- a/llvm/docs/CodingStandards.rst
+++ b/llvm/docs/CodingStandards.rst
@@ -73,7 +73,10 @@ Each toolchain provides a good reference for what it accepts:
* libstdc++: https://gcc.gnu.org/onlinedocs/libstdc++/manual/status.html#status.iso.2017
-* MSVC: https://msdn.microsoft.com/en-us/library/hh567368.aspx
+* MSVC: https://learn.microsoft.com/cpp/overview/visual-cpp-language-conformance
+
+Additionally, there are compiler comparison tables of supported C++ features on
+`cppreference.com <https://en.cppreference.com/w/cpp/compiler_support/17>`_.
C++ Standard Library
diff --git a/llvm/include/llvm/ADT/APFixedPoint.h b/llvm/include/llvm/ADT/APFixedPoint.h
index b0c5108..0c014e7 100644
--- a/llvm/include/llvm/ADT/APFixedPoint.h
+++ b/llvm/include/llvm/ADT/APFixedPoint.h
@@ -260,6 +260,7 @@ public:
static APFixedPoint getMax(const FixedPointSemantics &Sema);
static APFixedPoint getMin(const FixedPointSemantics &Sema);
+ static APFixedPoint getEpsilon(const FixedPointSemantics &Sema);
/// Given a floating point semantic, return the next floating point semantic
/// with a larger exponent and larger or equal mantissa.
diff --git a/llvm/include/llvm/ADT/BitVector.h b/llvm/include/llvm/ADT/BitVector.h
index e0de1af..0eaa77b6 100644
--- a/llvm/include/llvm/ADT/BitVector.h
+++ b/llvm/include/llvm/ADT/BitVector.h
@@ -42,7 +42,7 @@ template <typename BitVectorT> class const_set_bits_iterator_impl {
public:
using iterator_category = std::forward_iterator_tag;
- using difference_type = void;
+ using difference_type = std::ptrdiff_t;
using value_type = int;
using pointer = value_type*;
using reference = value_type&;
diff --git a/llvm/include/llvm/Analysis/TargetLibraryInfo.h b/llvm/include/llvm/Analysis/TargetLibraryInfo.h
index daf1d8e..46f31f9 100644
--- a/llvm/include/llvm/Analysis/TargetLibraryInfo.h
+++ b/llvm/include/llvm/Analysis/TargetLibraryInfo.h
@@ -129,7 +129,8 @@ public:
MASSV, // IBM MASS vector library.
SVML, // Intel short vector math library.
SLEEFGNUABI, // SLEEF - SIMD Library for Evaluating Elementary Functions.
- ArmPL // Arm Performance Libraries.
+ ArmPL, // Arm Performance Libraries.
+ AMDLIBM // AMD Math Vector library.
};
TargetLibraryInfoImpl();
diff --git a/llvm/include/llvm/Analysis/ValueTracking.h b/llvm/include/llvm/Analysis/ValueTracking.h
index 06f94f5..f0d0ee5 100644
--- a/llvm/include/llvm/Analysis/ValueTracking.h
+++ b/llvm/include/llvm/Analysis/ValueTracking.h
@@ -197,6 +197,12 @@ unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL,
Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB,
const TargetLibraryInfo *TLI);
+/// Given an exploded icmp instruction, return true if the comparison only
+/// checks the sign bit. If it only checks the sign bit, set TrueIfSigned if
+/// the result of the comparison is true when the input value is signed.
+bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS,
+ bool &TrueIfSigned);
+
/// Returns a pair of values, which if passed to llvm.is.fpclass, returns the
/// same result as an fcmp with the given operands.
///
diff --git a/llvm/include/llvm/Analysis/VecFuncs.def b/llvm/include/llvm/Analysis/VecFuncs.def
index 07edf68..394e4a0 100644
--- a/llvm/include/llvm/Analysis/VecFuncs.def
+++ b/llvm/include/llvm/Analysis/VecFuncs.def
@@ -1067,6 +1067,199 @@ TLI_DEFINE_VECFUNC("tgammaf", "armpl_vtgammaq_f32", FIXED(4), NOMASK, "_ZGV_LLVM
TLI_DEFINE_VECFUNC("tgamma", "armpl_svtgamma_f64_x", SCALABLE(2), MASKED, "_ZGVsMxv")
TLI_DEFINE_VECFUNC("tgammaf", "armpl_svtgamma_f32_x", SCALABLE(4), MASKED, "_ZGVsMxv")
+#elif defined(TLI_DEFINE_AMDLIBM_VECFUNCS)
+TLI_DEFINE_VECFUNC("sinf", "amd_vrs16_sinf", FIXED(16), NOMASK, "_ZGV_LLVM_N16v")
+TLI_DEFINE_VECFUNC("sinf", "amd_vrs8_sinf", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("sinf", "amd_vrs4_sinf", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("sin", "amd_vrd8_sin", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("sin", "amd_vrd4_sin", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("sin", "amd_vrd2_sin", FIXED(2), NOMASK, "_ZGV_LLVM_N2v")
+
+TLI_DEFINE_VECFUNC("llvm.sin.f32", "amd_vrs16_sinf", FIXED(16), NOMASK, "_ZGV_LLVM_N16v")
+TLI_DEFINE_VECFUNC("llvm.sin.f32", "amd_vrs8_sinf", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("llvm.sin.f32", "amd_vrs4_sinf", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("llvm.sin.f64", "amd_vrd8_sin", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("llvm.sin.f64", "amd_vrd4_sin", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("llvm.sin.f64", "amd_vrd2_sin", FIXED(2), NOMASK, "_ZGV_LLVM_N2v")
+
+TLI_DEFINE_VECFUNC("cosf", "amd_vrs16_cosf", FIXED(16), NOMASK, "_ZGV_LLVM_N16v")
+TLI_DEFINE_VECFUNC("cosf", "amd_vrs8_cosf", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("cosf", "amd_vrs4_cosf", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("cos", "amd_vrd8_cos", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("cos", "amd_vrd4_cos", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("cos", "amd_vrd2_cos", FIXED(2), NOMASK, "_ZGV_LLVM_N2v")
+
+TLI_DEFINE_VECFUNC("llvm.cos.f32", "amd_vrs16_cosf", FIXED(16), NOMASK, "_ZGV_LLVM_N16v")
+TLI_DEFINE_VECFUNC("llvm.cos.f32", "amd_vrs8_cosf", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("llvm.cos.f32", "amd_vrs4_cosf", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("llvm.cos.f64", "amd_vrd8_cos", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("llvm.cos.f64", "amd_vrd4_cos", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("llvm.cos.f64", "amd_vrd2_cos", FIXED(2), NOMASK, "_ZGV_LLVM_N2v")
+
+TLI_DEFINE_VECFUNC("expf", "amd_vrs16_expf", FIXED(16), NOMASK, "_ZGV_LLVM_N16v")
+TLI_DEFINE_VECFUNC("expf", "amd_vrs8_expf", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("expf", "amd_vrs4_expf", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("exp", "amd_vrd2_exp", FIXED(2), NOMASK, "_ZGV_LLVM_N2v")
+TLI_DEFINE_VECFUNC("exp", "amd_vrd4_exp", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("exp", "amd_vrd8_exp", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+
+TLI_DEFINE_VECFUNC("__expf_finite", "amd_vrs16_expf", FIXED(16), NOMASK, "_ZGV_LLVM_N16v")
+TLI_DEFINE_VECFUNC("__expf_finite", "amd_vrs8_expf", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("__expf_finite", "amd_vrs4_expf", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("__exp_finite", "amd_vrd2_exp", FIXED(2), NOMASK, "_ZGV_LLVM_N2v")
+TLI_DEFINE_VECFUNC("__exp_finite", "amd_vrd4_exp", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("__exp_finite", "amd_vrd8_exp", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+
+TLI_DEFINE_VECFUNC("llvm.exp.f32", "amd_vrs16_expf", FIXED(16), NOMASK, "_ZGV_LLVM_N16v")
+TLI_DEFINE_VECFUNC("llvm.exp.f32", "amd_vrs8_expf", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("llvm.exp.f32", "amd_vrs4_expf", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("llvm.exp.f64", "amd_vrd2_exp", FIXED(2), NOMASK, "_ZGV_LLVM_N2v")
+TLI_DEFINE_VECFUNC("llvm.exp.f64", "amd_vrd4_exp", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("llvm.exp.f64", "amd_vrd8_exp", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+
+TLI_DEFINE_VECFUNC("exp2f", "amd_vrs16_exp2f", FIXED(16), NOMASK, "_ZGV_LLVM_N16v")
+TLI_DEFINE_VECFUNC("exp2f", "amd_vrs8_exp2f", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("exp2f", "amd_vrs4_exp2f", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("exp2", "amd_vrd2_exp2", FIXED(2), NOMASK, "_ZGV_LLVM_N2v")
+TLI_DEFINE_VECFUNC("exp2", "amd_vrd4_exp2", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("exp2", "amd_vrd8_exp2", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+
+TLI_DEFINE_VECFUNC("__exp2f_finite", "amd_vrs16_exp2f", FIXED(16), NOMASK, "_ZGV_LLVM_N16v")
+TLI_DEFINE_VECFUNC("__exp2f_finite", "amd_vrs8_exp2f", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("__exp2f_finite", "amd_vrs4_exp2f", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("__exp2_finite", "amd_vrd2_exp2", FIXED(2), NOMASK, "_ZGV_LLVM_N2v")
+TLI_DEFINE_VECFUNC("__exp2_finite", "amd_vrd4_exp2", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("__exp2_finite", "amd_vrd8_exp2", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+
+TLI_DEFINE_VECFUNC("llvm.exp2.f32", "amd_vrs16_exp2f", FIXED(16), NOMASK, "_ZGV_LLVM_N16v")
+TLI_DEFINE_VECFUNC("llvm.exp2.f32", "amd_vrs8_exp2f", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("llvm.exp2.f32", "amd_vrs4_exp2f", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("llvm.exp2.f64", "amd_vrd2_exp2", FIXED(2), NOMASK, "_ZGV_LLVM_N2v")
+TLI_DEFINE_VECFUNC("llvm.exp2.f64", "amd_vrd4_exp2", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("llvm.exp2.f64", "amd_vrd8_exp2", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+
+TLI_DEFINE_VECFUNC("powf", "amd_vrs16_powf", FIXED(16), NOMASK, "_ZGV_LLVM_N16vv")
+TLI_DEFINE_VECFUNC("powf", "amd_vrs8_powf", FIXED(8), NOMASK, "_ZGV_LLVM_N8vv")
+TLI_DEFINE_VECFUNC("powf", "amd_vrs4_powf", FIXED(4), NOMASK, "_ZGV_LLVM_N4vv")
+TLI_DEFINE_VECFUNC("pow", "amd_vrd2_pow", FIXED(2), NOMASK, "_ZGV_LLVM_N2vv")
+TLI_DEFINE_VECFUNC("pow", "amd_vrd4_pow", FIXED(4), NOMASK, "_ZGV_LLVM_N4vv")
+TLI_DEFINE_VECFUNC("pow", "amd_vrd8_pow", FIXED(8), NOMASK, "_ZGV_LLVM_N8vv")
+
+TLI_DEFINE_VECFUNC("__powf_finite", "amd_vrs16_powf", FIXED(16), NOMASK, "_ZGV_LLVM_N16vv")
+TLI_DEFINE_VECFUNC("__powf_finite", "amd_vrs8_powf", FIXED(8), NOMASK, "_ZGV_LLVM_N8vv")
+TLI_DEFINE_VECFUNC("__powf_finite", "amd_vrs4_powf", FIXED(4), NOMASK, "_ZGV_LLVM_N4vv")
+TLI_DEFINE_VECFUNC("__pow_finite", "amd_vrd2_pow", FIXED(2), NOMASK, "_ZGV_LLVM_N2vv")
+TLI_DEFINE_VECFUNC("__pow_finite", "amd_vrd4_pow", FIXED(4), NOMASK, "_ZGV_LLVM_N4vv")
+TLI_DEFINE_VECFUNC("__pow_finite", "amd_vrd8_pow", FIXED(8), NOMASK, "_ZGV_LLVM_N8vv")
+
+TLI_DEFINE_VECFUNC("llvm.pow.f32", "amd_vrs16_powf", FIXED(16), NOMASK, "_ZGV_LLVM_N16vv")
+TLI_DEFINE_VECFUNC("llvm.pow.f32", "amd_vrs8_powf", FIXED(8), NOMASK, "_ZGV_LLVM_N8vv")
+TLI_DEFINE_VECFUNC("llvm.pow.f32", "amd_vrs4_powf", FIXED(4), NOMASK, "_ZGV_LLVM_N4vv")
+TLI_DEFINE_VECFUNC("llvm.pow.f64", "amd_vrd2_pow", FIXED(2), NOMASK, "_ZGV_LLVM_N2vv")
+TLI_DEFINE_VECFUNC("llvm.pow.f64", "amd_vrd4_pow", FIXED(4), NOMASK, "_ZGV_LLVM_N4vv")
+TLI_DEFINE_VECFUNC("llvm.pow.f64", "amd_vrd8_pow", FIXED(8), NOMASK, "_ZGV_LLVM_N8vv")
+
+TLI_DEFINE_VECFUNC("logf", "amd_vrs16_logf", FIXED(16), NOMASK, "_ZGV_LLVM_N16v")
+TLI_DEFINE_VECFUNC("logf", "amd_vrs8_logf", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("logf", "amd_vrs4_logf", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("log", "amd_vrd2_log", FIXED(2), NOMASK, "_ZGV_LLVM_N2v")
+TLI_DEFINE_VECFUNC("log", "amd_vrd4_log", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("log", "amd_vrd8_log", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+
+TLI_DEFINE_VECFUNC("__logf_finite", "amd_vrs16_logf", FIXED(16), NOMASK, "_ZGV_LLVM_N16v")
+TLI_DEFINE_VECFUNC("__logf_finite", "amd_vrs8_logf", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("__logf_finite", "amd_vrs4_logf", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("__log_finite", "amd_vrd2_log", FIXED(2), NOMASK, "_ZGV_LLVM_N2v")
+TLI_DEFINE_VECFUNC("__log_finite", "amd_vrd4_log", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("__log_finite", "amd_vrd8_log", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+
+TLI_DEFINE_VECFUNC("llvm.log.f32", "amd_vrs16_logf", FIXED(16), NOMASK, "_ZGV_LLVM_N16v")
+TLI_DEFINE_VECFUNC("llvm.log.f32", "amd_vrs8_logf", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("llvm.log.f32", "amd_vrs4_logf", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("llvm.log.f64", "amd_vrd2_log", FIXED(2), NOMASK, "_ZGV_LLVM_N2v")
+TLI_DEFINE_VECFUNC("llvm.log.f64", "amd_vrd4_log", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("llvm.log.f64", "amd_vrd8_log", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+
+TLI_DEFINE_VECFUNC("log2f", "amd_vrs16_log2f", FIXED(16), NOMASK, "_ZGV_LLVM_N16v")
+TLI_DEFINE_VECFUNC("log2f", "amd_vrs8_log2f", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("log2f", "amd_vrs4_log2f", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("log2", "amd_vrd2_log2", FIXED(2), NOMASK, "_ZGV_LLVM_N2v")
+TLI_DEFINE_VECFUNC("log2", "amd_vrd4_log2", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("log2", "amd_vrd8_log2", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+
+TLI_DEFINE_VECFUNC("__log2f_finite", "amd_vrs16_log2f", FIXED(16), NOMASK, "_ZGV_LLVM_N16v")
+TLI_DEFINE_VECFUNC("__log2f_finite", "amd_vrs8_log2f", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("__log2f_finite", "amd_vrs4_log2f", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("__log2_finite", "amd_vrd2_log2", FIXED(2), NOMASK, "_ZGV_LLVM_N2v")
+TLI_DEFINE_VECFUNC("__log2_finite", "amd_vrd4_log2", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("__log2_finite", "amd_vrd8_log2", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+
+TLI_DEFINE_VECFUNC("llvm.log2.f32", "amd_vrs16_log2f", FIXED(16), NOMASK, "_ZGV_LLVM_N16v")
+TLI_DEFINE_VECFUNC("llvm.log2.f32", "amd_vrs8_log2f", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("llvm.log2.f32", "amd_vrs4_log2f", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("llvm.log2.f64", "amd_vrd2_log2", FIXED(2), NOMASK, "_ZGV_LLVM_N2v")
+TLI_DEFINE_VECFUNC("llvm.log2.f64", "amd_vrd4_log2", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("llvm.log2.f64", "amd_vrd8_log2", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+
+TLI_DEFINE_VECFUNC("log10f", "amd_vrs16_log10f", FIXED(16), NOMASK, "_ZGV_LLVM_N16v")
+TLI_DEFINE_VECFUNC("log10f", "amd_vrs8_log10f", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("log10f", "amd_vrs4_log10f", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+
+TLI_DEFINE_VECFUNC("__log10f_finite", "amd_vrs16_log10f", FIXED(16), NOMASK, "_ZGV_LLVM_N16v")
+TLI_DEFINE_VECFUNC("__log10f_finite", "amd_vrs8_log10f", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("__log10f_finite", "amd_vrs4_log10f", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+
+TLI_DEFINE_VECFUNC("llvm.log10.f32", "amd_vrs16_log10f", FIXED(16), NOMASK, "_ZGV_LLVM_N16v")
+TLI_DEFINE_VECFUNC("llvm.log10.f32", "amd_vrs8_log10f", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("llvm.log10.f32", "amd_vrs4_log10f", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+
+TLI_DEFINE_VECFUNC("erff", "amd_vrs4_erff", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("erff", "amd_vrs8_erff", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("erff", "amd_vrs16_erff", FIXED(16), NOMASK, "_ZGV_LLVM_N16v")
+TLI_DEFINE_VECFUNC("erf", "amd_vrd2_erf", FIXED(2), NOMASK, "_ZGV_LLVM_N2v")
+TLI_DEFINE_VECFUNC("erf", "amd_vrd4_erf", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("erf", "amd_vrd8_erf", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+
+TLI_DEFINE_VECFUNC("exp10", "amd_vrd2_exp10", FIXED(2), NOMASK, "_ZGV_LLVM_N2v")
+TLI_DEFINE_VECFUNC("exp10f", "amd_vrs4_exp10f", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+
+TLI_DEFINE_VECFUNC("expm1", "amd_vrd2_expm1", FIXED(2), NOMASK, "_ZGV_LLVM_N2v")
+TLI_DEFINE_VECFUNC("expm1f", "amd_vrs4_expm1f", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+
+TLI_DEFINE_VECFUNC("log1p", "amd_vrd2_log1p", FIXED(2), NOMASK, "_ZGV_LLVM_N2v")
+TLI_DEFINE_VECFUNC("log1pf", "amd_vrs4_log1pf", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+
+TLI_DEFINE_VECFUNC("tan", "amd_vrd2_tan", FIXED(2), NOMASK, "_ZGV_LLVM_N2v")
+TLI_DEFINE_VECFUNC("tan", "amd_vrd4_tan", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("tan", "amd_vrd8_tan", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("tanf", "amd_vrs4_tanf", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("tanf", "amd_vrs8_tanf", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("tanf", "amd_vrs16_tanf", FIXED(16), NOMASK, "_ZGV_LLVM_N16v")
+
+TLI_DEFINE_VECFUNC("asin", "amd_vrd8_asin", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("asinf", "amd_vrs4_asinf", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("asinf", "amd_vrs8_asinf", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("asinf", "amd_vrs16_asinf", FIXED(16), NOMASK, "_ZGV_LLVM_N16v")
+
+TLI_DEFINE_VECFUNC("acosf", "amd_vrs4_acosf", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("acosf", "amd_vrs8_acosf", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+
+TLI_DEFINE_VECFUNC("atan", "amd_vrd2_atan", FIXED(2), NOMASK, "_ZGV_LLVM_N2v")
+TLI_DEFINE_VECFUNC("atan", "amd_vrd4_atan", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("atan", "amd_vrd8_atan", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("atanf", "amd_vrs4_atanf", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("atanf", "amd_vrs8_atanf", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+TLI_DEFINE_VECFUNC("atanf", "amd_vrs16_atanf", FIXED(16), NOMASK, "_ZGV_LLVM_N16v")
+
+TLI_DEFINE_VECFUNC("coshf", "amd_vrs4_coshf", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("coshf", "amd_vrs8_coshf", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+
+TLI_DEFINE_VECFUNC("tanhf", "amd_vrs4_tanhf", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+TLI_DEFINE_VECFUNC("tanhf", "amd_vrs8_tanhf", FIXED(8), NOMASK, "_ZGV_LLVM_N8v")
+
+TLI_DEFINE_VECFUNC("cbrt", "amd_vrd2_cbrt", FIXED(2), NOMASK, "_ZGV_LLVM_N2v")
+TLI_DEFINE_VECFUNC("cbrtf", "amd_vrs4_cbrtf", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
+
#else
#error "Must choose which vector library functions are to be defined."
#endif
@@ -1087,3 +1280,4 @@ TLI_DEFINE_VECFUNC("tgammaf", "armpl_svtgamma_f32_x", SCALABLE(4), MASKED, "_ZGV
#undef TLI_DEFINE_SLEEFGNUABI_SCALABLE_VECFUNCS
#undef TLI_DEFINE_MASSV_VECFUNCS_NAMES
#undef TLI_DEFINE_ARMPL_VECFUNCS
+#undef TLI_DEFINE_AMDLIBM_VECFUNCS
diff --git a/llvm/include/llvm/CodeGen/AccelTable.h b/llvm/include/llvm/CodeGen/AccelTable.h
index e6a6616..6ee817a 100644
--- a/llvm/include/llvm/CodeGen/AccelTable.h
+++ b/llvm/include/llvm/CodeGen/AccelTable.h
@@ -275,11 +275,6 @@ struct DenseMapInfo<OffsetAndUnitID> : DenseMapInfo<OffsetAndUnitID::Base> {};
/// emitDWARF5AccelTable function.
class DWARF5AccelTableData : public AccelTableData {
public:
- struct AttributeEncoding {
- dwarf::Index Index;
- dwarf::Form Form;
- };
-
static uint32_t hash(StringRef Name) { return caseFoldingDjbHash(Name); }
DWARF5AccelTableData(const DIE &Die, const uint32_t UnitID,
@@ -289,7 +284,7 @@ public:
const unsigned DieTag, const unsigned UnitID,
const bool IsTU = false)
: OffsetVal(DieOffset), ParentOffset(DefiningParentOffset),
- DieTag(DieTag), UnitID(UnitID), IsTU(IsTU) {}
+ DieTag(DieTag), AbbrevNumber(0), IsTU(IsTU), UnitID(UnitID) {}
#ifndef NDEBUG
void print(raw_ostream &OS) const override;
@@ -330,6 +325,12 @@ public:
return OffsetAndUnitID(*ParentOffset, getUnitID());
}
+ /// Sets AbbrevIndex for an Entry.
+ void setAbbrevNumber(uint16_t AbbrevNum) { AbbrevNumber = AbbrevNum; }
+
+ /// Returns AbbrevIndex for an Entry.
+ uint16_t getAbbrevNumber() const { return AbbrevNumber; }
+
/// If `Die` has a non-null parent and the parent is not a declaration,
/// return its offset.
static std::optional<uint64_t> getDefiningParentDieOffset(const DIE &Die);
@@ -338,12 +339,42 @@ protected:
std::variant<const DIE *, uint64_t> OffsetVal;
std::optional<uint64_t> ParentOffset;
uint32_t DieTag : 16;
- uint32_t UnitID : 15;
+ uint32_t AbbrevNumber : 15;
uint32_t IsTU : 1;
-
+ uint32_t UnitID;
uint64_t order() const override { return getDieOffset(); }
};
+class DebugNamesAbbrev : public FoldingSetNode {
+public:
+ uint32_t DieTag;
+ uint32_t Number;
+ struct AttributeEncoding {
+ dwarf::Index Index;
+ dwarf::Form Form;
+ };
+ DebugNamesAbbrev(uint32_t DieTag) : DieTag(DieTag) {}
+ /// Add attribute encoding to an abbreviation.
+ void addAttribute(const DebugNamesAbbrev::AttributeEncoding &Attr) {
+ AttrVect.push_back(Attr);
+ }
+ /// Set abbreviation tag index.
+ void setNumber(uint32_t AbbrevNumber) { Number = AbbrevNumber; }
+ /// Get abbreviation tag index.
+ uint32_t getNumber() const { return Number; }
+ /// Get DIE Tag.
+ uint32_t getDieTag() const { return DieTag; }
+ /// Used to gather unique data for the abbreviation folding set.
+ void Profile(FoldingSetNodeID &ID) const;
+ /// Returns attributes for an abbreviation.
+ const SmallVector<AttributeEncoding, 1> &getAttributes() const {
+ return AttrVect;
+ }
+
+private:
+ SmallVector<AttributeEncoding, 1> AttrVect;
+};
+
struct TypeUnitMetaInfo {
// Symbol for start of the TU section or signature if this is SplitDwarf.
std::variant<MCSymbol *, uint64_t> LabelOrSignature;
@@ -358,7 +389,7 @@ class DWARF5AccelTable : public AccelTable<DWARF5AccelTableData> {
public:
struct UnitIndexAndEncoding {
unsigned Index;
- DWARF5AccelTableData::AttributeEncoding Encoding;
+ DebugNamesAbbrev::AttributeEncoding Encoding;
};
/// Returns type units that were constructed.
const TUVectorTy &getTypeUnitsSymbols() { return TUSymbolsOrHashes; }
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
index 5fd80e5..637c2c7 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
@@ -1084,7 +1084,7 @@ public:
},
[=](const LegalityQuery &Query) {
LLT T = Query.Types[LargeTypeIdx];
- if (T.isVector() && T.getElementType().isPointer())
+ if (T.isPointerVector())
T = T.changeElementType(LLT::scalar(T.getScalarSizeInBits()));
return std::make_pair(TypeIdx, T);
});
diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index 349d128..8cb0bc9 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -1179,6 +1179,12 @@ enum NodeType {
/// counter-like register (or other high accuracy low latency clock source).
READCYCLECOUNTER,
+ /// READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
+ /// It has the same semantics as the READCYCLECOUNTER implementation except
+ /// that the result is the content of the architecture-specific fixed
+ /// frequency counter suitable for measuring elapsed time.
+ READSTEADYCOUNTER,
+
/// HANDLENODE node - Used as a handle for various purposes.
HANDLENODE,
diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h
index 886ec0b..7bb12d8 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -668,6 +668,8 @@ public:
bool isTarget = false);
SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL,
bool LegalTypes = true);
+ SDValue getShiftAmountConstant(const APInt &Val, EVT VT, const SDLoc &DL,
+ bool LegalTypes = true);
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL,
bool isTarget = false);
diff --git a/llvm/include/llvm/CodeGenTypes/LowLevelType.h b/llvm/include/llvm/CodeGenTypes/LowLevelType.h
index cc33152..5a16cff 100644
--- a/llvm/include/llvm/CodeGenTypes/LowLevelType.h
+++ b/llvm/include/llvm/CodeGenTypes/LowLevelType.h
@@ -134,15 +134,17 @@ public:
explicit LLT(MVT VT);
- constexpr bool isValid() const { return IsScalar || RawData != 0; }
+ constexpr bool isValid() const { return IsScalar || IsPointer || IsVector; }
constexpr bool isScalar() const { return IsScalar; }
- constexpr bool isPointer() const {
- return isValid() && IsPointer && !IsVector;
- }
+ constexpr bool isPointer() const { return IsPointer && !IsVector; }
+
+ constexpr bool isPointerVector() const { return IsPointer && IsVector; }
+
+ constexpr bool isPointerOrPointerVector() const { return IsPointer; }
- constexpr bool isVector() const { return isValid() && IsVector; }
+ constexpr bool isVector() const { return IsVector; }
/// Returns the number of elements in a vector LLT. Must only be called on
/// vector types.
@@ -209,7 +211,7 @@ public:
/// but the new element size. Otherwise, return the new element type. Invalid
/// for pointer types. For pointer types, use changeElementType.
constexpr LLT changeElementSize(unsigned NewEltSize) const {
- assert(!getScalarType().isPointer() &&
+ assert(!isPointerOrPointerVector() &&
"invalid to directly change element size for pointers");
return isVector() ? LLT::vector(getElementCount(), NewEltSize)
: LLT::scalar(NewEltSize);
diff --git a/llvm/include/llvm/Frontend/Driver/CodeGenOptions.h b/llvm/include/llvm/Frontend/Driver/CodeGenOptions.h
index 0b1d924..0180670 100644
--- a/llvm/include/llvm/Frontend/Driver/CodeGenOptions.h
+++ b/llvm/include/llvm/Frontend/Driver/CodeGenOptions.h
@@ -29,7 +29,8 @@ enum class VectorLibrary {
SVML, // Intel short vector math library.
SLEEF, // SLEEF SIMD Library for Evaluating Elementary Functions.
Darwin_libsystem_m, // Use Darwin's libsystem_m vector functions.
- ArmPL // Arm Performance Libraries.
+ ArmPL, // Arm Performance Libraries.
+ AMDLIBM // AMD vector math library.
};
TargetLibraryInfoImpl *createTLII(llvm::Triple &TargetTriple,
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 3c19c7b..4becdd7 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -870,6 +870,8 @@ def int_pcmarker : DefaultAttrsIntrinsic<[], [llvm_i32_ty]>;
def int_readcyclecounter : DefaultAttrsIntrinsic<[llvm_i64_ty]>;
+def int_readsteadycounter : DefaultAttrsIntrinsic<[llvm_i64_ty]>;
+
// The assume intrinsic is marked InaccessibleMemOnly so that proper control
// dependencies will be maintained.
def int_assume : DefaultAttrsIntrinsic<
diff --git a/llvm/include/llvm/Object/COFFImportFile.h b/llvm/include/llvm/Object/COFFImportFile.h
index 23c3e6a..402ded0 100644
--- a/llvm/include/llvm/Object/COFFImportFile.h
+++ b/llvm/include/llvm/Object/COFFImportFile.h
@@ -135,10 +135,20 @@ struct COFFShortExport {
}
};
-Error writeImportLibrary(StringRef ImportName, StringRef Path,
- ArrayRef<COFFShortExport> Exports,
- ArrayRef<COFFShortExport> NativeExports,
- COFF::MachineTypes Machine, bool MinGW);
+/// Writes a COFF import library containing entries described by the Exports
+/// array.
+///
+/// For hybrid targets such as ARM64EC, additional native entry points can be
+/// exposed using the NativeExports parameter. When NativeExports is used, the
+/// output import library will expose these native ARM64 imports alongside the
+/// entries described in the Exports array. Such a library can be used for
+/// linking both ARM64EC and pure ARM64 objects, and the linker will pick only
+/// the exports relevant to the target platform. For non-hybrid targets,
+/// the NativeExports parameter should not be used.
+Error writeImportLibrary(
+ StringRef ImportName, StringRef Path, ArrayRef<COFFShortExport> Exports,
+ COFF::MachineTypes Machine, bool MinGW,
+ ArrayRef<COFFShortExport> NativeExports = std::nullopt);
} // namespace object
} // namespace llvm
diff --git a/llvm/include/llvm/Passes/CodeGenPassBuilder.h b/llvm/include/llvm/Passes/CodeGenPassBuilder.h
index fa6dbd4..80bbfb7 100644
--- a/llvm/include/llvm/Passes/CodeGenPassBuilder.h
+++ b/llvm/include/llvm/Passes/CodeGenPassBuilder.h
@@ -7,8 +7,7 @@
//===----------------------------------------------------------------------===//
/// \file
///
-/// Interfaces for registering analysis passes, producing common pass manager
-/// configurations, and parsing of pass pipelines.
+/// Interfaces for producing common pass manager configurations.
///
//===----------------------------------------------------------------------===//
@@ -137,16 +136,6 @@ public:
raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut,
CodeGenFileType FileType) const;
- void registerModuleAnalyses(ModuleAnalysisManager &) const;
- void registerFunctionAnalyses(FunctionAnalysisManager &) const;
- void registerMachineFunctionAnalyses(MachineFunctionAnalysisManager &) const;
-
- void registerAnalyses(MachineFunctionAnalysisManager &MFAM) const {
- registerModuleAnalyses(*MFAM.MAM);
- registerFunctionAnalyses(*MFAM.FAM);
- registerMachineFunctionAnalyses(MFAM);
- }
-
PassInstrumentationCallbacks *getPassInstrumentationCallbacks() const {
return PIC;
}
@@ -239,14 +228,6 @@ protected:
CGPassBuilderOption Opt;
PassInstrumentationCallbacks *PIC;
- /// Target override these hooks to parse target-specific analyses.
- void registerTargetAnalysis(ModuleAnalysisManager &) const {}
- void registerTargetAnalysis(FunctionAnalysisManager &) const {}
- void registerTargetAnalysis(MachineFunctionAnalysisManager &) const {}
- std::pair<StringRef, bool> getTargetPassNameFromLegacyName(StringRef) const {
- return {"", false};
- }
-
template <typename TMC> TMC &getTM() const { return static_cast<TMC &>(TM); }
CodeGenOptLevel getOptLevel() const { return TM.getOptLevel(); }
@@ -577,52 +558,6 @@ Error CodeGenPassBuilder<Derived>::verifyStartStop(
return Error::success();
}
-static inline AAManager registerAAAnalyses() {
- AAManager AA;
-
- // The order in which these are registered determines their priority when
- // being queried.
-
- // Basic AliasAnalysis support.
- // Add TypeBasedAliasAnalysis before BasicAliasAnalysis so that
- // BasicAliasAnalysis wins if they disagree. This is intended to help
- // support "obvious" type-punning idioms.
- AA.registerFunctionAnalysis<TypeBasedAA>();
- AA.registerFunctionAnalysis<ScopedNoAliasAA>();
- AA.registerFunctionAnalysis<BasicAA>();
-
- return AA;
-}
-
-template <typename Derived>
-void CodeGenPassBuilder<Derived>::registerModuleAnalyses(
- ModuleAnalysisManager &MAM) const {
-#define MODULE_ANALYSIS(NAME, CREATE_PASS) \
- MAM.registerPass([&] { return CREATE_PASS; });
-#include "MachinePassRegistry.def"
- derived().registerTargetAnalysis(MAM);
-}
-
-template <typename Derived>
-void CodeGenPassBuilder<Derived>::registerFunctionAnalyses(
- FunctionAnalysisManager &FAM) const {
- FAM.registerPass([this] { return registerAAAnalyses(); });
-
-#define FUNCTION_ANALYSIS(NAME, CREATE_PASS) \
- FAM.registerPass([&] { return CREATE_PASS; });
-#include "MachinePassRegistry.def"
- derived().registerTargetAnalysis(FAM);
-}
-
-template <typename Derived>
-void CodeGenPassBuilder<Derived>::registerMachineFunctionAnalyses(
- MachineFunctionAnalysisManager &MFAM) const {
-#define MACHINE_FUNCTION_ANALYSIS(NAME, CREATE_PASS) \
- MFAM.registerPass([&] { return CREATE_PASS; });
-#include "MachinePassRegistry.def"
- derived().registerTargetAnalysis(MFAM);
-}
-
template <typename Derived>
void CodeGenPassBuilder<Derived>::addISelPasses(AddIRPass &addPass) const {
derived().addGlobalMergePass(addPass);
diff --git a/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h b/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h
index 88ec60c..e3b3942 100644
--- a/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h
+++ b/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h
@@ -23,6 +23,7 @@
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Object/BuildID.h"
+#include "llvm/ProfileData/Coverage/MCDCTypes.h"
#include "llvm/ProfileData/InstrProf.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/Compiler.h"
@@ -37,7 +38,6 @@
#include <sstream>
#include <string>
#include <system_error>
-#include <tuple>
#include <utility>
#include <vector>
@@ -249,19 +249,6 @@ struct CounterMappingRegion {
MCDCBranchRegion
};
- using MCDCConditionID = unsigned int;
- struct MCDCParameters {
- /// Byte Index of Bitmap Coverage Object for a Decision Region.
- unsigned BitmapIdx = 0;
-
- /// Number of Conditions used for a Decision Region.
- unsigned NumConditions = 0;
-
- /// IDs used to represent a branch region and other branch regions
- /// evaluated based on True and False branches.
- MCDCConditionID ID = 0, TrueID = 0, FalseID = 0;
- };
-
/// Primary Counter that is also used for Branch Regions (TrueCount).
Counter Count;
@@ -269,7 +256,25 @@ struct CounterMappingRegion {
Counter FalseCount;
/// Parameters used for Modified Condition/Decision Coverage
- MCDCParameters MCDCParams;
+ mcdc::Parameters MCDCParams;
+
+ template <class MaybeConstInnerParameters, class MaybeConstMCDCParameters>
+ static auto &getParams(MaybeConstMCDCParameters &MCDCParams) {
+ using InnerParameters =
+ typename std::remove_const<MaybeConstInnerParameters>::type;
+ MaybeConstInnerParameters *Params =
+ std::get_if<InnerParameters>(&MCDCParams);
+ assert(Params && "InnerParameters unavailable");
+ return *Params;
+ }
+
+ const auto &getDecisionParams() const {
+ return getParams<const mcdc::DecisionParameters>(MCDCParams);
+ }
+
+ const auto &getBranchParams() const {
+ return getParams<const mcdc::BranchParameters>(MCDCParams);
+ }
unsigned FileID = 0;
unsigned ExpandedFileID = 0;
@@ -284,19 +289,20 @@ struct CounterMappingRegion {
LineStart(LineStart), ColumnStart(ColumnStart), LineEnd(LineEnd),
ColumnEnd(ColumnEnd), Kind(Kind) {}
- CounterMappingRegion(Counter Count, Counter FalseCount,
- MCDCParameters MCDCParams, unsigned FileID,
+ CounterMappingRegion(Counter Count, Counter FalseCount, unsigned FileID,
unsigned ExpandedFileID, unsigned LineStart,
unsigned ColumnStart, unsigned LineEnd,
- unsigned ColumnEnd, RegionKind Kind)
+ unsigned ColumnEnd, RegionKind Kind,
+ const mcdc::Parameters &MCDCParams = std::monostate())
: Count(Count), FalseCount(FalseCount), MCDCParams(MCDCParams),
FileID(FileID), ExpandedFileID(ExpandedFileID), LineStart(LineStart),
ColumnStart(ColumnStart), LineEnd(LineEnd), ColumnEnd(ColumnEnd),
Kind(Kind) {}
- CounterMappingRegion(MCDCParameters MCDCParams, unsigned FileID,
- unsigned LineStart, unsigned ColumnStart,
- unsigned LineEnd, unsigned ColumnEnd, RegionKind Kind)
+ CounterMappingRegion(const mcdc::DecisionParameters &MCDCParams,
+ unsigned FileID, unsigned LineStart,
+ unsigned ColumnStart, unsigned LineEnd,
+ unsigned ColumnEnd, RegionKind Kind)
: MCDCParams(MCDCParams), FileID(FileID), LineStart(LineStart),
ColumnStart(ColumnStart), LineEnd(LineEnd), ColumnEnd(ColumnEnd),
Kind(Kind) {}
@@ -333,26 +339,20 @@ struct CounterMappingRegion {
static CounterMappingRegion
makeBranchRegion(Counter Count, Counter FalseCount, unsigned FileID,
unsigned LineStart, unsigned ColumnStart, unsigned LineEnd,
- unsigned ColumnEnd) {
- return CounterMappingRegion(Count, FalseCount, MCDCParameters(), FileID, 0,
- LineStart, ColumnStart, LineEnd, ColumnEnd,
- BranchRegion);
+ unsigned ColumnEnd,
+ const mcdc::Parameters &MCDCParams = std::monostate()) {
+ return CounterMappingRegion(
+ Count, FalseCount, FileID, 0, LineStart, ColumnStart, LineEnd,
+ ColumnEnd,
+ (std::get_if<mcdc::BranchParameters>(&MCDCParams) ? MCDCBranchRegion
+ : BranchRegion),
+ MCDCParams);
}
static CounterMappingRegion
- makeBranchRegion(Counter Count, Counter FalseCount, MCDCParameters MCDCParams,
- unsigned FileID, unsigned LineStart, unsigned ColumnStart,
- unsigned LineEnd, unsigned ColumnEnd) {
- return CounterMappingRegion(Count, FalseCount, MCDCParams, FileID, 0,
- LineStart, ColumnStart, LineEnd, ColumnEnd,
- MCDCParams.ID == 0 ? BranchRegion
- : MCDCBranchRegion);
- }
-
- static CounterMappingRegion
- makeDecisionRegion(MCDCParameters MCDCParams, unsigned FileID,
- unsigned LineStart, unsigned ColumnStart, unsigned LineEnd,
- unsigned ColumnEnd) {
+ makeDecisionRegion(const mcdc::DecisionParameters &MCDCParams,
+ unsigned FileID, unsigned LineStart, unsigned ColumnStart,
+ unsigned LineEnd, unsigned ColumnEnd) {
return CounterMappingRegion(MCDCParams, FileID, LineStart, ColumnStart,
LineEnd, ColumnEnd, MCDCDecisionRegion);
}
@@ -407,17 +407,20 @@ private:
LineColPairMap CondLoc;
public:
- MCDCRecord(CounterMappingRegion Region, TestVectors TV,
- TVPairMap IndependencePairs, BoolVector Folded, CondIDMap PosToID,
- LineColPairMap CondLoc)
- : Region(Region), TV(TV), IndependencePairs(IndependencePairs),
- Folded(Folded), PosToID(PosToID), CondLoc(CondLoc){};
+ MCDCRecord(const CounterMappingRegion &Region, TestVectors &&TV,
+ TVPairMap &&IndependencePairs, BoolVector &&Folded,
+ CondIDMap &&PosToID, LineColPairMap &&CondLoc)
+ : Region(Region), TV(std::move(TV)),
+ IndependencePairs(std::move(IndependencePairs)),
+ Folded(std::move(Folded)), PosToID(std::move(PosToID)),
+ CondLoc(std::move(CondLoc)){};
CounterMappingRegion getDecisionRegion() const { return Region; }
unsigned getNumConditions() const {
- assert(Region.MCDCParams.NumConditions != 0 &&
+ unsigned NumConditions = Region.getDecisionParams().NumConditions;
+ assert(NumConditions != 0 &&
"In MC/DC, NumConditions should never be zero!");
- return Region.MCDCParams.NumConditions;
+ return NumConditions;
}
unsigned getNumTestVectors() const { return TV.size(); }
bool isCondFolded(unsigned Condition) const { return Folded[Condition]; }
@@ -614,7 +617,9 @@ struct FunctionRecord {
FunctionRecord(FunctionRecord &&FR) = default;
FunctionRecord &operator=(FunctionRecord &&) = default;
- void pushMCDCRecord(MCDCRecord Record) { MCDCRecords.push_back(Record); }
+ void pushMCDCRecord(MCDCRecord &&Record) {
+ MCDCRecords.push_back(std::move(Record));
+ }
void pushRegion(CounterMappingRegion Region, uint64_t Count,
uint64_t FalseCount) {
diff --git a/llvm/include/llvm/ProfileData/Coverage/MCDCTypes.h b/llvm/include/llvm/ProfileData/Coverage/MCDCTypes.h
new file mode 100644
index 0000000..51f528b
--- /dev/null
+++ b/llvm/include/llvm/ProfileData/Coverage/MCDCTypes.h
@@ -0,0 +1,54 @@
+//===- MCDCTypes.h - Types related to MC/DC Coverage ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Types related to MC/DC Coverage.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PROFILEDATA_COVERAGE_MCDCTYPES_H
+#define LLVM_PROFILEDATA_COVERAGE_MCDCTYPES_H
+
+#include <array>
+#include <variant>
+
+namespace llvm::coverage::mcdc {
+
+/// The ID for MCDCBranch.
+using ConditionID = int16_t;
+using ConditionIDs = std::array<ConditionID, 2>;
+
+struct DecisionParameters {
+ /// Byte Index of Bitmap Coverage Object for a Decision Region.
+ unsigned BitmapIdx;
+
+ /// Number of Conditions used for a Decision Region.
+ uint16_t NumConditions;
+
+ DecisionParameters() = delete;
+ DecisionParameters(unsigned BitmapIdx, unsigned NumConditions)
+ : BitmapIdx(BitmapIdx), NumConditions(NumConditions) {}
+};
+
+struct BranchParameters {
+ /// IDs used to represent a branch region and other branch regions
+ /// evaluated based on True and False branches.
+ ConditionID ID;
+ ConditionIDs Conds;
+
+ BranchParameters() = delete;
+ BranchParameters(ConditionID ID, const ConditionIDs &Conds)
+ : ID(ID), Conds(Conds) {}
+};
+
+/// The type of MC/DC-specific parameters.
+using Parameters =
+ std::variant<std::monostate, DecisionParameters, BranchParameters>;
+
+} // namespace llvm::coverage::mcdc
+
+#endif // LLVM_PROFILEDATA_COVERAGE_MCDCTYPES_H
diff --git a/llvm/include/llvm/ProfileData/InstrProf.h b/llvm/include/llvm/ProfileData/InstrProf.h
index aa08e94..a928ba6 100644
--- a/llvm/include/llvm/ProfileData/InstrProf.h
+++ b/llvm/include/llvm/ProfileData/InstrProf.h
@@ -449,6 +449,17 @@ private:
return "** External Symbol **";
}
+ // Returns the canonial name of the given PGOName. In a canonical name, all
+ // suffixes that begins with "." except ".__uniq." are stripped.
+ // FIXME: Unify this with `FunctionSamples::getCanonicalFnName`.
+ static StringRef getCanonicalName(StringRef PGOName);
+
+ // Add the function into the symbol table, by creating the following
+ // map entries:
+ // name-set = {PGOFuncName} + {getCanonicalName(PGOFuncName)} if the canonical
+ // name is different from pgo name
+ // - In MD5NameMap: <MD5Hash(name), name> for name in name-set
+ // - In MD5FuncMap: <MD5Hash(name), &F> for name in name-set
Error addFuncWithName(Function &F, StringRef PGOFuncName);
// If the symtab is created by a series of calls to \c addFuncName, \c
diff --git a/llvm/include/llvm/Support/ExponentialBackoff.h b/llvm/include/llvm/Support/ExponentialBackoff.h
new file mode 100644
index 0000000..8208a74
--- /dev/null
+++ b/llvm/include/llvm/Support/ExponentialBackoff.h
@@ -0,0 +1,65 @@
+//===- llvm/Support/ExponentialBackoff.h ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a helper class for implementing exponential backoff.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_EXPONENTIALBACKOFF_H
+#define LLVM_EXPONENTIALBACKOFF_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Error.h"
+#include <chrono>
+#include <random>
+
+namespace llvm {
+
+/// A class to help implement exponential backoff.
+///
+/// Example usage:
+/// \code
+/// ExponentialBackoff Backoff(10s);
+/// do {
+/// if (tryToDoSomething())
+/// return ItWorked;
+/// } while (Backoff.waitForNextAttempt());
+/// return Timeout;
+/// \endcode
+class ExponentialBackoff {
+public:
+ using duration = std::chrono::steady_clock::duration;
+ using time_point = std::chrono::steady_clock::time_point;
+
+ /// \param Timeout the maximum wall time this should run for starting when
+ /// this object is constructed.
+ /// \param MinWait the minimum amount of time `waitForNextAttempt` will sleep
+ /// for.
+ /// \param MaxWait the maximum amount of time `waitForNextAttempt` will sleep
+ /// for.
+ ExponentialBackoff(duration Timeout,
+ duration MinWait = std::chrono::milliseconds(10),
+ duration MaxWait = std::chrono::milliseconds(500))
+ : MinWait(MinWait), MaxWait(MaxWait),
+ EndTime(std::chrono::steady_clock::now() + Timeout) {}
+
+ /// Blocks while waiting for the next attempt.
+ /// \returns true if you should try again, false if the timeout has been
+ /// reached.
+ bool waitForNextAttempt();
+
+private:
+ duration MinWait;
+ duration MaxWait;
+ time_point EndTime;
+ std::random_device RandDev;
+ int64_t CurrentMultiplier = 1;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_EXPONENTIALBACKOFF_H
diff --git a/llvm/include/llvm/Support/TargetOpcodes.def b/llvm/include/llvm/Support/TargetOpcodes.def
index abb2370..42cb854 100644
--- a/llvm/include/llvm/Support/TargetOpcodes.def
+++ b/llvm/include/llvm/Support/TargetOpcodes.def
@@ -352,6 +352,9 @@ HANDLE_TARGET_OPCODE(G_INTRINSIC_ROUNDEVEN)
/// INTRINSIC readcyclecounter
HANDLE_TARGET_OPCODE(G_READCYCLECOUNTER)
+/// INTRINSIC readsteadycounter
+HANDLE_TARGET_OPCODE(G_READSTEADYCOUNTER)
+
/// Generic load (including anyext load)
HANDLE_TARGET_OPCODE(G_LOAD)
diff --git a/llvm/include/llvm/Target/GenericOpcodes.td b/llvm/include/llvm/Target/GenericOpcodes.td
index 2c73b67..19197f5 100644
--- a/llvm/include/llvm/Target/GenericOpcodes.td
+++ b/llvm/include/llvm/Target/GenericOpcodes.td
@@ -1101,6 +1101,12 @@ def G_READCYCLECOUNTER : GenericInstruction {
let hasSideEffects = true;
}
+def G_READSTEADYCOUNTER : GenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins);
+ let hasSideEffects = true;
+}
+
//------------------------------------------------------------------------------
// Memory ops
//------------------------------------------------------------------------------
diff --git a/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td b/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
index 6bc1942..b1f3c50 100644
--- a/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
+++ b/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
@@ -168,6 +168,7 @@ def : GINodeEquiv<G_FMAXNUM_IEEE, fmaxnum_ieee>;
def : GINodeEquiv<G_FMAXIMUM, fmaximum>;
def : GINodeEquiv<G_FMINIMUM, fminimum>;
def : GINodeEquiv<G_READCYCLECOUNTER, readcyclecounter>;
+def : GINodeEquiv<G_READSTEADYCOUNTER, readsteadycounter>;
def : GINodeEquiv<G_ROTR, rotr>;
def : GINodeEquiv<G_ROTL, rotl>;
def : GINodeEquiv<G_LROUND, lround>;
diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td
index 2236035..5f8bf0d 100644
--- a/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -657,6 +657,9 @@ def prefetch : SDNode<"ISD::PREFETCH" , SDTPrefetch,
def readcyclecounter : SDNode<"ISD::READCYCLECOUNTER", SDTIntLeaf,
[SDNPHasChain, SDNPSideEffect]>;
+def readsteadycounter : SDNode<"ISD::READSTEADYCOUNTER", SDTIntLeaf,
+ [SDNPHasChain, SDNPSideEffect]>;
+
def membarrier : SDNode<"ISD::MEMBARRIER", SDTNone,
[SDNPHasChain, SDNPSideEffect]>;
diff --git a/llvm/include/llvm/TargetParser/ARMTargetParser.h b/llvm/include/llvm/TargetParser/ARMTargetParser.h
index c42d66f..ec38171 100644
--- a/llvm/include/llvm/TargetParser/ARMTargetParser.h
+++ b/llvm/include/llvm/TargetParser/ARMTargetParser.h
@@ -258,6 +258,7 @@ uint64_t parseArchExt(StringRef ArchExt);
ArchKind parseCPUArch(StringRef CPU);
ProfileKind parseArchProfile(StringRef Arch);
unsigned parseArchVersion(StringRef Arch);
+unsigned parseArchMinorVersion(StringRef Arch);
void fillValidCPUArchList(SmallVectorImpl<StringRef> &Values);
StringRef computeDefaultTargetABI(const Triple &TT, StringRef CPU);
diff --git a/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h b/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h
index 950cc8c..9309043 100644
--- a/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h
+++ b/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h
@@ -170,45 +170,6 @@ public:
}
}
- /// Given an exploded icmp instruction, return true if the comparison only
- /// checks the sign bit. If it only checks the sign bit, set TrueIfSigned if
- /// the result of the comparison is true when the input value is signed.
- static bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS,
- bool &TrueIfSigned) {
- switch (Pred) {
- case ICmpInst::ICMP_SLT: // True if LHS s< 0
- TrueIfSigned = true;
- return RHS.isZero();
- case ICmpInst::ICMP_SLE: // True if LHS s<= -1
- TrueIfSigned = true;
- return RHS.isAllOnes();
- case ICmpInst::ICMP_SGT: // True if LHS s> -1
- TrueIfSigned = false;
- return RHS.isAllOnes();
- case ICmpInst::ICMP_SGE: // True if LHS s>= 0
- TrueIfSigned = false;
- return RHS.isZero();
- case ICmpInst::ICMP_UGT:
- // True if LHS u> RHS and RHS == sign-bit-mask - 1
- TrueIfSigned = true;
- return RHS.isMaxSignedValue();
- case ICmpInst::ICMP_UGE:
- // True if LHS u>= RHS and RHS == sign-bit-mask (2^7, 2^15, 2^31, etc)
- TrueIfSigned = true;
- return RHS.isMinSignedValue();
- case ICmpInst::ICMP_ULT:
- // True if LHS u< RHS and RHS == sign-bit-mask (2^7, 2^15, 2^31, etc)
- TrueIfSigned = false;
- return RHS.isMinSignedValue();
- case ICmpInst::ICMP_ULE:
- // True if LHS u<= RHS and RHS == sign-bit-mask - 1
- TrueIfSigned = false;
- return RHS.isMaxSignedValue();
- default:
- return false;
- }
- }
-
/// Add one to a Constant
static Constant *AddOne(Constant *C) {
return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
diff --git a/llvm/lib/Analysis/DomConditionCache.cpp b/llvm/lib/Analysis/DomConditionCache.cpp
index 3dad0c2..274f3ff 100644
--- a/llvm/lib/Analysis/DomConditionCache.cpp
+++ b/llvm/lib/Analysis/DomConditionCache.cpp
@@ -43,7 +43,7 @@ static void findAffectedValues(Value *Cond,
if (!Visited.insert(V).second)
continue;
- ICmpInst::Predicate Pred;
+ CmpInst::Predicate Pred;
Value *A, *B;
// Only recurse into and/or if it matches the top-level and/or type.
if (TopLevelIsAnd ? match(V, m_LogicalAnd(m_Value(A), m_Value(B)))
@@ -66,7 +66,17 @@ static void findAffectedValues(Value *Cond,
// A > C3 && A < C4.
if (match(A, m_Add(m_Value(X), m_ConstantInt())))
AddAffected(X);
+ // Handle icmp slt/sgt (bitcast X to int), 0/-1, which is supported by
+ // computeKnownFPClass().
+ if ((Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGT) &&
+ match(A, m_ElementWiseBitCast(m_Value(X))))
+ Affected.push_back(X);
}
+ } else if (match(Cond, m_CombineOr(m_FCmp(Pred, m_Value(A), m_Constant()),
+ m_Intrinsic<Intrinsic::is_fpclass>(
+ m_Value(A), m_Constant())))) {
+ // Handle patterns that computeKnownFPClass() support.
+ AddAffected(A);
}
}
}
diff --git a/llvm/lib/Analysis/IVDescriptors.cpp b/llvm/lib/Analysis/IVDescriptors.cpp
index 1aa324c..055f121 100644
--- a/llvm/lib/Analysis/IVDescriptors.cpp
+++ b/llvm/lib/Analysis/IVDescriptors.cpp
@@ -76,7 +76,7 @@ static Instruction *lookThroughAnd(PHINode *Phi, Type *&RT,
// Matches either I & 2^x-1 or 2^x-1 & I. If we find a match, we update RT
// with a new integer type of the corresponding bit width.
- if (match(J, m_c_And(m_Instruction(I), m_APInt(M)))) {
+ if (match(J, m_And(m_Instruction(I), m_APInt(M)))) {
int32_t Bits = (*M + 1).exactLogBase2();
if (Bits > 0) {
RT = IntegerType::get(Phi->getContext(), Bits);
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index 51e258d..08050be 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -39,6 +39,7 @@
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PatternMatch.h"
+#include "llvm/IR/Statepoint.h"
#include "llvm/Support/KnownBits.h"
#include <algorithm>
#include <optional>
@@ -3031,21 +3032,20 @@ static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS,
Type *ITy = getCompareTy(RHS); // The return type.
Value *X;
+ const APInt *C;
+ if (!match(RHS, m_APIntAllowUndef(C)))
+ return nullptr;
+
// Sign-bit checks can be optimized to true/false after unsigned
// floating-point casts:
// icmp slt (bitcast (uitofp X)), 0 --> false
// icmp sgt (bitcast (uitofp X)), -1 --> true
if (match(LHS, m_ElementWiseBitCast(m_UIToFP(m_Value(X))))) {
- if (Pred == ICmpInst::ICMP_SLT && match(RHS, m_Zero()))
- return ConstantInt::getFalse(ITy);
- if (Pred == ICmpInst::ICMP_SGT && match(RHS, m_AllOnes()))
- return ConstantInt::getTrue(ITy);
+ bool TrueIfSigned;
+ if (isSignBitCheck(Pred, *C, TrueIfSigned))
+ return ConstantInt::getBool(ITy, !TrueIfSigned);
}
- const APInt *C;
- if (!match(RHS, m_APIntAllowUndef(C)))
- return nullptr;
-
// Rule out tautological comparisons (eg., ult 0 or uge 0).
ConstantRange RHS_CR = ConstantRange::makeExactICmpRegion(Pred, *C);
if (RHS_CR.isEmptySet())
@@ -3246,8 +3246,8 @@ static bool trySimplifyICmpWithAdds(CmpInst::Predicate Pred, Value *LHS,
Value *X;
const APInt *C1, *C2;
- if (!match(LHS, m_c_Add(m_Value(X), m_APInt(C1))) ||
- !match(RHS, m_c_Add(m_Specific(X), m_APInt(C2))))
+ if (!match(LHS, m_Add(m_Value(X), m_APInt(C1))) ||
+ !match(RHS, m_Add(m_Specific(X), m_APInt(C2))))
return false;
return (C1->slt(*C2) && C1->isNonNegative()) ||
@@ -6847,6 +6847,27 @@ static Value *simplifyIntrinsic(CallBase *Call, Value *Callee,
}
case Intrinsic::experimental_constrained_ldexp:
return simplifyLdexp(Args[0], Args[1], Q, true);
+ case Intrinsic::experimental_gc_relocate: {
+ GCRelocateInst &GCR = *cast<GCRelocateInst>(Call);
+ Value *DerivedPtr = GCR.getDerivedPtr();
+ Value *BasePtr = GCR.getBasePtr();
+
+ // Undef is undef, even after relocation.
+ if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) {
+ return UndefValue::get(GCR.getType());
+ }
+
+ if (auto *PT = dyn_cast<PointerType>(GCR.getType())) {
+ // For now, the assumption is that the relocation of null will be null
+ // for most any collector. If this ever changes, a corresponding hook
+ // should be added to GCStrategy and this code should check it first.
+ if (isa<ConstantPointerNull>(DerivedPtr)) {
+ // Use null-pointer of gc_relocate's type to replace it.
+ return ConstantPointerNull::get(PT);
+ }
+ }
+ return nullptr;
+ }
default:
return nullptr;
}
diff --git a/llvm/lib/Analysis/TargetLibraryInfo.cpp b/llvm/lib/Analysis/TargetLibraryInfo.cpp
index 25951d2..710762a 100644
--- a/llvm/lib/Analysis/TargetLibraryInfo.cpp
+++ b/llvm/lib/Analysis/TargetLibraryInfo.cpp
@@ -37,7 +37,9 @@ static cl::opt<TargetLibraryInfoImpl::VectorLibrary> ClVectorLibrary(
clEnumValN(TargetLibraryInfoImpl::SLEEFGNUABI, "sleefgnuabi",
"SIMD Library for Evaluating Elementary Functions"),
clEnumValN(TargetLibraryInfoImpl::ArmPL, "ArmPL",
- "Arm Performance Libraries")));
+ "Arm Performance Libraries"),
+ clEnumValN(TargetLibraryInfoImpl::AMDLIBM, "AMDLIBM",
+ "AMD vector math library")));
StringLiteral const TargetLibraryInfoImpl::StandardNames[LibFunc::NumLibFuncs] =
{
@@ -1273,6 +1275,16 @@ void TargetLibraryInfoImpl::addVectorizableFunctionsFromVecLib(
}
break;
}
+ case AMDLIBM: {
+ const VecDesc VecFuncs[] = {
+#define TLI_DEFINE_AMDLIBM_VECFUNCS
+#define TLI_DEFINE_VECFUNC(SCAL, VEC, VF, MASK, VABI_PREFIX) \
+ {SCAL, VEC, VF, MASK, VABI_PREFIX},
+#include "llvm/Analysis/VecFuncs.def"
+ };
+ addVectorizableFunctions(VecFuncs);
+ break;
+ }
case NoLibrary:
break;
}
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 0e40a02..cc1d5b7 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -3810,6 +3810,45 @@ void KnownFPClass::propagateCanonicalizingSrc(const KnownFPClass &Src,
propagateNaN(Src, /*PreserveSign=*/true);
}
+/// Given an exploded icmp instruction, return true if the comparison only
+/// checks the sign bit. If it only checks the sign bit, set TrueIfSigned if
+/// the result of the comparison is true when the input value is signed.
+bool llvm::isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS,
+ bool &TrueIfSigned) {
+ switch (Pred) {
+ case ICmpInst::ICMP_SLT: // True if LHS s< 0
+ TrueIfSigned = true;
+ return RHS.isZero();
+ case ICmpInst::ICMP_SLE: // True if LHS s<= -1
+ TrueIfSigned = true;
+ return RHS.isAllOnes();
+ case ICmpInst::ICMP_SGT: // True if LHS s> -1
+ TrueIfSigned = false;
+ return RHS.isAllOnes();
+ case ICmpInst::ICMP_SGE: // True if LHS s>= 0
+ TrueIfSigned = false;
+ return RHS.isZero();
+ case ICmpInst::ICMP_UGT:
+ // True if LHS u> RHS and RHS == sign-bit-mask - 1
+ TrueIfSigned = true;
+ return RHS.isMaxSignedValue();
+ case ICmpInst::ICMP_UGE:
+ // True if LHS u>= RHS and RHS == sign-bit-mask (2^7, 2^15, 2^31, etc)
+ TrueIfSigned = true;
+ return RHS.isMinSignedValue();
+ case ICmpInst::ICMP_ULT:
+ // True if LHS u< RHS and RHS == sign-bit-mask (2^7, 2^15, 2^31, etc)
+ TrueIfSigned = false;
+ return RHS.isMinSignedValue();
+ case ICmpInst::ICMP_ULE:
+ // True if LHS u<= RHS and RHS == sign-bit-mask - 1
+ TrueIfSigned = false;
+ return RHS.isMaxSignedValue();
+ default:
+ return false;
+ }
+}
+
/// Returns a pair of values, which if passed to llvm.is.fpclass, returns the
/// same result as an fcmp with the given operands.
std::pair<Value *, FPClassTest> llvm::fcmpToClassTest(FCmpInst::Predicate Pred,
@@ -4225,9 +4264,62 @@ llvm::fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS,
return fcmpImpliesClass(Pred, F, LHS, *ConstRHS, LookThroughSrc);
}
-static FPClassTest computeKnownFPClassFromAssumes(const Value *V,
- const SimplifyQuery &Q) {
- FPClassTest KnownFromAssume = fcAllFlags;
+static void computeKnownFPClassFromCond(const Value *V, Value *Cond,
+ bool CondIsTrue,
+ const Instruction *CxtI,
+ KnownFPClass &KnownFromContext) {
+ CmpInst::Predicate Pred;
+ Value *LHS;
+ uint64_t ClassVal = 0;
+ const APFloat *CRHS;
+ const APInt *RHS;
+ if (match(Cond, m_FCmp(Pred, m_Value(LHS), m_APFloat(CRHS)))) {
+ auto [CmpVal, MaskIfTrue, MaskIfFalse] = fcmpImpliesClass(
+ Pred, *CxtI->getParent()->getParent(), LHS, *CRHS, LHS != V);
+ if (CmpVal == V)
+ KnownFromContext.knownNot(~(CondIsTrue ? MaskIfTrue : MaskIfFalse));
+ } else if (match(Cond, m_Intrinsic<Intrinsic::is_fpclass>(
+ m_Value(LHS), m_ConstantInt(ClassVal)))) {
+ FPClassTest Mask = static_cast<FPClassTest>(ClassVal);
+ KnownFromContext.knownNot(CondIsTrue ? ~Mask : Mask);
+ } else if (match(Cond, m_ICmp(Pred, m_ElementWiseBitCast(m_Value(LHS)),
+ m_APInt(RHS)))) {
+ bool TrueIfSigned;
+ if (!isSignBitCheck(Pred, *RHS, TrueIfSigned))
+ return;
+ if (TrueIfSigned == CondIsTrue)
+ KnownFromContext.signBitMustBeOne();
+ else
+ KnownFromContext.signBitMustBeZero();
+ }
+}
+
+static KnownFPClass computeKnownFPClassFromContext(const Value *V,
+ const SimplifyQuery &Q) {
+ KnownFPClass KnownFromContext;
+
+ if (!Q.CxtI)
+ return KnownFromContext;
+
+ if (Q.DC && Q.DT) {
+ // Handle dominating conditions.
+ for (BranchInst *BI : Q.DC->conditionsFor(V)) {
+ Value *Cond = BI->getCondition();
+
+ BasicBlockEdge Edge0(BI->getParent(), BI->getSuccessor(0));
+ if (Q.DT->dominates(Edge0, Q.CxtI->getParent()))
+ computeKnownFPClassFromCond(V, Cond, /*CondIsTrue=*/true, Q.CxtI,
+ KnownFromContext);
+
+ BasicBlockEdge Edge1(BI->getParent(), BI->getSuccessor(1));
+ if (Q.DT->dominates(Edge1, Q.CxtI->getParent()))
+ computeKnownFPClassFromCond(V, Cond, /*CondIsTrue=*/false, Q.CxtI,
+ KnownFromContext);
+ }
+ }
+
+ if (!Q.AC)
+ return KnownFromContext;
// Try to restrict the floating-point classes based on information from
// assumptions.
@@ -4235,9 +4327,8 @@ static FPClassTest computeKnownFPClassFromAssumes(const Value *V,
if (!AssumeVH)
continue;
CallInst *I = cast<CallInst>(AssumeVH);
- const Function *F = I->getFunction();
- assert(F == Q.CxtI->getParent()->getParent() &&
+ assert(I->getFunction() == Q.CxtI->getParent()->getParent() &&
"Got assumption for the wrong function!");
assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
"must be an assume intrinsic");
@@ -4245,25 +4336,11 @@ static FPClassTest computeKnownFPClassFromAssumes(const Value *V,
if (!isValidAssumeForContext(I, Q.CxtI, Q.DT))
continue;
- CmpInst::Predicate Pred;
- Value *LHS, *RHS;
- uint64_t ClassVal = 0;
- if (match(I->getArgOperand(0), m_FCmp(Pred, m_Value(LHS), m_Value(RHS)))) {
- const APFloat *CRHS;
- if (match(RHS, m_APFloat(CRHS))) {
- auto [CmpVal, MaskIfTrue, MaskIfFalse] =
- fcmpImpliesClass(Pred, *F, LHS, *CRHS, LHS != V);
- if (CmpVal == V)
- KnownFromAssume &= MaskIfTrue;
- }
- } else if (match(I->getArgOperand(0),
- m_Intrinsic<Intrinsic::is_fpclass>(
- m_Value(LHS), m_ConstantInt(ClassVal)))) {
- KnownFromAssume &= static_cast<FPClassTest>(ClassVal);
- }
+ computeKnownFPClassFromCond(V, I->getArgOperand(0), /*CondIsTrue=*/true,
+ Q.CxtI, KnownFromContext);
}
- return KnownFromAssume;
+ return KnownFromContext;
}
void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
@@ -4371,10 +4448,8 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
KnownNotFromFlags |= fcInf;
}
- if (Q.AC) {
- FPClassTest AssumedClasses = computeKnownFPClassFromAssumes(V, Q);
- KnownNotFromFlags |= ~AssumedClasses;
- }
+ KnownFPClass AssumedClasses = computeKnownFPClassFromContext(V, Q);
+ KnownNotFromFlags |= ~AssumedClasses.KnownFPClasses;
// We no longer need to find out about these bits from inputs if we can
// assume this from flags/attributes.
@@ -4382,6 +4457,12 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
auto ClearClassesFromFlags = make_scope_exit([=, &Known] {
Known.knownNot(KnownNotFromFlags);
+ if (!Known.SignBit && AssumedClasses.SignBit) {
+ if (*AssumedClasses.SignBit)
+ Known.signBitMustBeOne();
+ else
+ Known.signBitMustBeZero();
+ }
});
if (!Op)
@@ -5283,7 +5364,8 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
bool First = true;
- for (Value *IncValue : P->incoming_values()) {
+ for (const Use &U : P->operands()) {
+ Value *IncValue = U.get();
// Skip direct self references.
if (IncValue == P)
continue;
@@ -5292,8 +5374,10 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
// Recurse, but cap the recursion to two levels, because we don't want
// to waste time spinning around in loops. We need at least depth 2 to
// detect known sign bits.
- computeKnownFPClass(IncValue, DemandedElts, InterestedClasses, KnownSrc,
- PhiRecursionLimit, Q);
+ computeKnownFPClass(
+ IncValue, DemandedElts, InterestedClasses, KnownSrc,
+ PhiRecursionLimit,
+ Q.getWithInstruction(P->getIncomingBlock(U)->getTerminator()));
if (First) {
Known = KnownSrc;
diff --git a/llvm/lib/CodeGen/AsmPrinter/AccelTable.cpp b/llvm/lib/CodeGen/AsmPrinter/AccelTable.cpp
index 1024aab..230d7ad 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AccelTable.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AccelTable.cpp
@@ -208,8 +208,13 @@ class Dwarf5AccelTableWriter : public AccelTableWriter {
};
Header Header;
- DenseMap<uint32_t, SmallVector<DWARF5AccelTableData::AttributeEncoding, 3>>
- Abbreviations;
+ /// FoldingSet that uniques the abbreviations.
+ FoldingSet<DebugNamesAbbrev> AbbreviationsSet;
+ /// Vector containing DebugNames abbreviations for iteration in order.
+ SmallVector<DebugNamesAbbrev *, 5> AbbreviationsVector;
+ /// The bump allocator to use when creating DIEAbbrev objects in the uniqued
+ /// storage container.
+ BumpPtrAllocator Alloc;
ArrayRef<std::variant<MCSymbol *, uint64_t>> CompUnits;
ArrayRef<std::variant<MCSymbol *, uint64_t>> TypeUnits;
llvm::function_ref<std::optional<DWARF5AccelTable::UnitIndexAndEncoding>(
@@ -234,7 +239,7 @@ class Dwarf5AccelTableWriter : public AccelTableWriter {
void emitEntry(
const DWARF5AccelTableData &Entry,
const DenseMap<OffsetAndUnitID, MCSymbol *> &DIEOffsetToAccelEntryLabel,
- DenseSet<MCSymbol *> &EmittedAccelEntrySymbols) const;
+ DenseSet<MCSymbol *> &EmittedAccelEntrySymbols);
void emitData();
public:
@@ -370,7 +375,7 @@ void AppleAccelTableWriter::emit() const {
DWARF5AccelTableData::DWARF5AccelTableData(const DIE &Die,
const uint32_t UnitID,
const bool IsTU)
- : OffsetVal(&Die), DieTag(Die.getTag()), UnitID(UnitID), IsTU(IsTU) {}
+ : OffsetVal(&Die), DieTag(Die.getTag()), IsTU(IsTU), UnitID(UnitID) {}
void Dwarf5AccelTableWriter::Header::emit(Dwarf5AccelTableWriter &Ctx) {
assert(CompUnitCount > 0 && "Index must have at least one CU.");
@@ -409,51 +414,6 @@ DWARF5AccelTableData::getDefiningParentDieOffset(const DIE &Die) {
return {};
}
-enum IdxParentEncoding : uint8_t {
- NoIndexedParent = 0, /// Parent information present but parent isn't indexed.
- Ref4 = 1, /// Parent information present and parent is indexed.
- NoParent = 2, /// Parent information missing.
-};
-
-static uint32_t constexpr NumBitsIdxParent = 2;
-
-uint8_t encodeIdxParent(const std::optional<dwarf::Form> MaybeParentForm) {
- if (!MaybeParentForm)
- return NoParent;
- switch (*MaybeParentForm) {
- case dwarf::Form::DW_FORM_flag_present:
- return NoIndexedParent;
- case dwarf::Form::DW_FORM_ref4:
- return Ref4;
- default:
- // This is not crashing on bad input: we should only reach this if the
- // internal compiler logic is faulty; see getFormForIdxParent.
- llvm_unreachable("Bad form for IDX_parent");
- }
-}
-
-static uint32_t constexpr ParentBitOffset = dwarf::DW_IDX_type_hash;
-static uint32_t constexpr TagBitOffset = ParentBitOffset + NumBitsIdxParent;
-static uint32_t getTagFromAbbreviationTag(const uint32_t AbbrvTag) {
- return AbbrvTag >> TagBitOffset;
-}
-
-/// Constructs a unique AbbrevTag that captures what a DIE accesses.
-/// Using this tag we can emit a unique abbreviation for each DIE.
-static uint32_t constructAbbreviationTag(
- const unsigned Tag,
- const std::optional<DWARF5AccelTable::UnitIndexAndEncoding> &EntryRet,
- std::optional<dwarf::Form> MaybeParentForm) {
- uint32_t AbbrvTag = 0;
- if (EntryRet)
- AbbrvTag |= 1 << EntryRet->Encoding.Index;
- AbbrvTag |= 1 << dwarf::DW_IDX_die_offset;
- AbbrvTag |= 1 << dwarf::DW_IDX_parent;
- AbbrvTag |= encodeIdxParent(MaybeParentForm) << ParentBitOffset;
- AbbrvTag |= Tag << TagBitOffset;
- return AbbrvTag;
-}
-
static std::optional<dwarf::Form>
getFormForIdxParent(const DenseSet<OffsetAndUnitID> &IndexedOffsets,
std::optional<OffsetAndUnitID> ParentOffset) {
@@ -467,26 +427,42 @@ getFormForIdxParent(const DenseSet<OffsetAndUnitID> &IndexedOffsets,
return dwarf::Form::DW_FORM_flag_present;
}
+void DebugNamesAbbrev::Profile(FoldingSetNodeID &ID) const {
+ ID.AddInteger(DieTag);
+ for (const DebugNamesAbbrev::AttributeEncoding &Enc : AttrVect) {
+ ID.AddInteger(Enc.Index);
+ ID.AddInteger(Enc.Form);
+ }
+}
+
void Dwarf5AccelTableWriter::populateAbbrevsMap() {
for (auto &Bucket : Contents.getBuckets()) {
for (auto *Hash : Bucket) {
for (auto *Value : Hash->getValues<DWARF5AccelTableData *>()) {
std::optional<DWARF5AccelTable::UnitIndexAndEncoding> EntryRet =
getIndexForEntry(*Value);
- unsigned Tag = Value->getDieTag();
std::optional<dwarf::Form> MaybeParentForm = getFormForIdxParent(
IndexedOffsets, Value->getParentDieOffsetAndUnitID());
- uint32_t AbbrvTag =
- constructAbbreviationTag(Tag, EntryRet, MaybeParentForm);
- if (Abbreviations.count(AbbrvTag) == 0) {
- SmallVector<DWARF5AccelTableData::AttributeEncoding, 3> UA;
- if (EntryRet)
- UA.push_back(EntryRet->Encoding);
- UA.push_back({dwarf::DW_IDX_die_offset, dwarf::DW_FORM_ref4});
- if (MaybeParentForm)
- UA.push_back({dwarf::DW_IDX_parent, *MaybeParentForm});
- Abbreviations.try_emplace(AbbrvTag, UA);
+ DebugNamesAbbrev Abbrev(Value->getDieTag());
+ if (EntryRet)
+ Abbrev.addAttribute(EntryRet->Encoding);
+ Abbrev.addAttribute({dwarf::DW_IDX_die_offset, dwarf::DW_FORM_ref4});
+ if (MaybeParentForm)
+ Abbrev.addAttribute({dwarf::DW_IDX_parent, *MaybeParentForm});
+ FoldingSetNodeID ID;
+ Abbrev.Profile(ID);
+ void *InsertPos;
+ if (DebugNamesAbbrev *Existing =
+ AbbreviationsSet.FindNodeOrInsertPos(ID, InsertPos)) {
+ Value->setAbbrevNumber(Existing->getNumber());
+ continue;
}
+ DebugNamesAbbrev *NewAbbrev =
+ new (Alloc) DebugNamesAbbrev(std::move(Abbrev));
+ AbbreviationsVector.push_back(NewAbbrev);
+ NewAbbrev->setNumber(AbbreviationsVector.size());
+ AbbreviationsSet.InsertNode(NewAbbrev, InsertPos);
+ Value->setAbbrevNumber(NewAbbrev->getNumber());
}
}
}
@@ -536,14 +512,13 @@ void Dwarf5AccelTableWriter::emitStringOffsets() const {
void Dwarf5AccelTableWriter::emitAbbrevs() const {
Asm->OutStreamer->emitLabel(AbbrevStart);
- for (const auto &Abbrev : Abbreviations) {
+ for (const DebugNamesAbbrev *Abbrev : AbbreviationsVector) {
Asm->OutStreamer->AddComment("Abbrev code");
- uint32_t Tag = getTagFromAbbreviationTag(Abbrev.first);
- assert(Tag != 0);
- Asm->emitULEB128(Abbrev.first);
- Asm->OutStreamer->AddComment(dwarf::TagString(Tag));
- Asm->emitULEB128(Tag);
- for (const auto &AttrEnc : Abbrev.second) {
+ Asm->emitULEB128(Abbrev->getNumber());
+ Asm->OutStreamer->AddComment(dwarf::TagString(Abbrev->getDieTag()));
+ Asm->emitULEB128(Abbrev->getDieTag());
+ for (const DebugNamesAbbrev::AttributeEncoding &AttrEnc :
+ Abbrev->getAttributes()) {
Asm->emitULEB128(AttrEnc.Index, dwarf::IndexString(AttrEnc.Index).data());
Asm->emitULEB128(AttrEnc.Form,
dwarf::FormEncodingString(AttrEnc.Form).data());
@@ -558,21 +533,15 @@ void Dwarf5AccelTableWriter::emitAbbrevs() const {
void Dwarf5AccelTableWriter::emitEntry(
const DWARF5AccelTableData &Entry,
const DenseMap<OffsetAndUnitID, MCSymbol *> &DIEOffsetToAccelEntryLabel,
- DenseSet<MCSymbol *> &EmittedAccelEntrySymbols) const {
+ DenseSet<MCSymbol *> &EmittedAccelEntrySymbols) {
+ unsigned AbbrevIndex = Entry.getAbbrevNumber() - 1;
+ assert(AbbrevIndex < AbbreviationsVector.size() &&
+ "Entry abbrev index is outside of abbreviations vector range.");
+ DebugNamesAbbrev *Abbrev = AbbreviationsVector[AbbrevIndex];
std::optional<DWARF5AccelTable::UnitIndexAndEncoding> EntryRet =
getIndexForEntry(Entry);
std::optional<OffsetAndUnitID> MaybeParentOffset =
Entry.getParentDieOffsetAndUnitID();
- std::optional<dwarf::Form> MaybeParentForm =
- getFormForIdxParent(IndexedOffsets, MaybeParentOffset);
- uint32_t AbbrvTag =
- constructAbbreviationTag(Entry.getDieTag(), EntryRet, MaybeParentForm);
- auto AbbrevIt = Abbreviations.find(AbbrvTag);
- assert(AbbrevIt != Abbreviations.end() &&
- "Why wasn't this abbrev generated?");
- assert(getTagFromAbbreviationTag(AbbrevIt->first) == Entry.getDieTag() &&
- "Invalid Tag");
-
auto EntrySymbolIt =
DIEOffsetToAccelEntryLabel.find(Entry.getDieOffsetAndUnitID());
assert(EntrySymbolIt != DIEOffsetToAccelEntryLabel.end());
@@ -584,9 +553,10 @@ void Dwarf5AccelTableWriter::emitEntry(
if (EmittedAccelEntrySymbols.insert(EntrySymbol).second)
Asm->OutStreamer->emitLabel(EntrySymbol);
- Asm->emitULEB128(AbbrevIt->first, "Abbreviation code");
+ Asm->emitULEB128(Entry.getAbbrevNumber(), "Abbreviation code");
- for (const auto &AttrEnc : AbbrevIt->second) {
+ for (const DebugNamesAbbrev::AttributeEncoding &AttrEnc :
+ Abbrev->getAttributes()) {
Asm->OutStreamer->AddComment(dwarf::IndexString(AttrEnc.Index));
switch (AttrEnc.Index) {
case dwarf::DW_IDX_compile_unit:
diff --git a/llvm/lib/CodeGen/BasicBlockSections.cpp b/llvm/lib/CodeGen/BasicBlockSections.cpp
index eb3f9e7..09e45ea 100644
--- a/llvm/lib/CodeGen/BasicBlockSections.cpp
+++ b/llvm/lib/CodeGen/BasicBlockSections.cpp
@@ -208,9 +208,14 @@ assignSections(MachineFunction &MF,
if (I != FuncClusterInfo.end()) {
MBB.setSectionID(I->second.ClusterID);
} else {
- // BB goes into the special cold section if it is not specified in the
- // cluster info map.
- MBB.setSectionID(MBBSectionID::ColdSectionID);
+ const TargetInstrInfo &TII =
+ *MBB.getParent()->getSubtarget().getInstrInfo();
+
+ if (TII.isMBBSafeToSplitToCold(MBB)) {
+ // BB goes into the special cold section if it is not specified in the
+ // cluster info map.
+ MBB.setSectionID(MBBSectionID::ColdSectionID);
+ }
}
}
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index c1d8e89..311dd9d 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -1885,6 +1885,8 @@ unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
return TargetOpcode::G_INTRINSIC_TRUNC;
case Intrinsic::readcyclecounter:
return TargetOpcode::G_READCYCLECOUNTER;
+ case Intrinsic::readsteadycounter:
+ return TargetOpcode::G_READSTEADYCOUNTER;
case Intrinsic::ptrmask:
return TargetOpcode::G_PTRMASK;
case Intrinsic::lrint:
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 464ff08..e39fdae 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -1716,8 +1716,7 @@ Register LegalizerHelper::coerceToScalar(Register Val) {
Register NewVal = Val;
assert(Ty.isVector());
- LLT EltTy = Ty.getElementType();
- if (EltTy.isPointer())
+ if (Ty.isPointerVector())
NewVal = MIRBuilder.buildPtrToInt(NewTy, NewVal).getReg(0);
return MIRBuilder.buildBitcast(NewTy, NewVal).getReg(0);
}
@@ -7964,7 +7963,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerSelect(MachineInstr &MI) {
auto [DstReg, DstTy, MaskReg, MaskTy, Op1Reg, Op1Ty, Op2Reg, Op2Ty] =
MI.getFirst4RegLLTs();
- bool IsEltPtr = DstTy.getScalarType().isPointer();
+ bool IsEltPtr = DstTy.isPointerOrPointerVector();
if (IsEltPtr) {
LLT ScalarPtrTy = LLT::scalar(DstTy.getScalarSizeInBits());
LLT NewTy = DstTy.changeElementType(ScalarPtrTy);
diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index a5827c2..d58b628 100644
--- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -199,7 +199,7 @@ void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
MachineInstrBuilder
MachineIRBuilder::buildPtrAdd(const DstOp &Res, const SrcOp &Op0,
const SrcOp &Op1, std::optional<unsigned> Flags) {
- assert(Res.getLLTTy(*getMRI()).getScalarType().isPointer() &&
+ assert(Res.getLLTTy(*getMRI()).isPointerOrPointerVector() &&
Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
diff --git a/llvm/lib/CodeGen/IntrinsicLowering.cpp b/llvm/lib/CodeGen/IntrinsicLowering.cpp
index 61920a0..fe450cb 100644
--- a/llvm/lib/CodeGen/IntrinsicLowering.cpp
+++ b/llvm/lib/CodeGen/IntrinsicLowering.cpp
@@ -312,6 +312,12 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
CI->replaceAllUsesWith(ConstantInt::get(Type::getInt64Ty(Context), 0));
break;
}
+ case Intrinsic::readsteadycounter: {
+ errs() << "WARNING: this target does not support the llvm.readsteadycounter"
+ << " intrinsic. It is being lowered to a constant 0\n";
+ CI->replaceAllUsesWith(ConstantInt::get(Type::getInt64Ty(Context), 0));
+ break;
+ }
case Intrinsic::dbg_declare:
case Intrinsic::dbg_label:
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index c65e917..2632b5b 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1288,10 +1288,10 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
break;
- if (!PtrTy.getScalarType().isPointer())
+ if (!PtrTy.isPointerOrPointerVector())
report("gep first operand must be a pointer", MI);
- if (OffsetTy.getScalarType().isPointer())
+ if (OffsetTy.isPointerOrPointerVector())
report("gep offset operand must not be a pointer", MI);
// TODO: Is the offset allowed to be a scalar with a vector?
@@ -1304,7 +1304,7 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
break;
- if (!DstTy.getScalarType().isPointer())
+ if (!DstTy.isPointerOrPointerVector())
report("ptrmask result type must be a pointer", MI);
if (!MaskTy.getScalarType().isScalar())
@@ -1330,15 +1330,13 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
if (!DstTy.isValid() || !SrcTy.isValid())
break;
- LLT DstElTy = DstTy.getScalarType();
- LLT SrcElTy = SrcTy.getScalarType();
- if (DstElTy.isPointer() || SrcElTy.isPointer())
+ if (DstTy.isPointerOrPointerVector() || SrcTy.isPointerOrPointerVector())
report("Generic extend/truncate can not operate on pointers", MI);
verifyVectorElementMatch(DstTy, SrcTy, MI);
- unsigned DstSize = DstElTy.getSizeInBits();
- unsigned SrcSize = SrcElTy.getSizeInBits();
+ unsigned DstSize = DstTy.getScalarSizeInBits();
+ unsigned SrcSize = SrcTy.getScalarSizeInBits();
switch (MI->getOpcode()) {
default:
if (DstSize <= SrcSize)
diff --git a/llvm/lib/CodeGen/SelectOptimize.cpp b/llvm/lib/CodeGen/SelectOptimize.cpp
index 31c4b63..5609f48 100644
--- a/llvm/lib/CodeGen/SelectOptimize.cpp
+++ b/llvm/lib/CodeGen/SelectOptimize.cpp
@@ -621,6 +621,12 @@ void SelectOptimizeImpl::convertProfitableSIGroups(SelectGroups &ProfSIGroups) {
SelectLike LastSI = ASI.back();
BasicBlock *StartBlock = SI.getI()->getParent();
BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(LastSI.getI()));
+ // With RemoveDIs turned off, SplitPt can be a dbg.* intrinsic. With
+ // RemoveDIs turned on, SplitPt would instead point to the next
+ // instruction. To match existing dbg.* intrinsic behaviour with RemoveDIs,
+ // tell splitBasicBlock that we want to include any DPValues attached to
+ // SplitPt in the splice.
+ SplitPt.setHeadBit(true);
BasicBlock *EndBlock = StartBlock->splitBasicBlock(SplitPt, "select.end");
BFI->setBlockFreq(EndBlock, BFI->getBlockFreq(StartBlock));
// Delete the unconditional branch that was just created by the split.
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index d3cd9b1..bdd2336 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -9252,7 +9252,7 @@ SDValue DAGCombiner::MatchLoadCombine(SDNode *N) {
// Transfer chain users from old loads to the new load.
for (LoadSDNode *L : Loads)
- DAG.ReplaceAllUsesOfValueWith(SDValue(L, 1), SDValue(NewLoad.getNode(), 1));
+ DAG.makeEquivalentMemoryOrdering(L, NewLoad);
if (!NeedsBswap)
return NewLoad;
@@ -12971,12 +12971,12 @@ SDValue DAGCombiner::CombineExtLoad(SDNode *N) {
SDValue BasePtr = LN0->getBasePtr();
for (unsigned Idx = 0; Idx < NumSplits; Idx++) {
const unsigned Offset = Idx * Stride;
- const Align Align = commonAlignment(LN0->getAlign(), Offset);
- SDValue SplitLoad = DAG.getExtLoad(
- ExtType, SDLoc(LN0), SplitDstVT, LN0->getChain(), BasePtr,
- LN0->getPointerInfo().getWithOffset(Offset), SplitSrcVT, Align,
- LN0->getMemOperand()->getFlags(), LN0->getAAInfo());
+ SDValue SplitLoad =
+ DAG.getExtLoad(ExtType, SDLoc(LN0), SplitDstVT, LN0->getChain(),
+ BasePtr, LN0->getPointerInfo().getWithOffset(Offset),
+ SplitSrcVT, LN0->getOriginalAlign(),
+ LN0->getMemOperand()->getFlags(), LN0->getAAInfo());
BasePtr = DAG.getMemBasePlusOffset(BasePtr, TypeSize::getFixed(Stride), DL);
@@ -14382,7 +14382,6 @@ SDValue DAGCombiner::reduceLoadWidth(SDNode *N) {
DAG.getDataLayout().isBigEndian() ? AdjustBigEndianShift(ShAmt) : ShAmt;
uint64_t PtrOff = PtrAdjustmentInBits / 8;
- Align NewAlign = commonAlignment(LN0->getAlign(), PtrOff);
SDLoc DL(LN0);
// The original load itself didn't wrap, so an offset within it doesn't.
SDNodeFlags Flags;
@@ -14394,13 +14393,14 @@ SDValue DAGCombiner::reduceLoadWidth(SDNode *N) {
SDValue Load;
if (ExtType == ISD::NON_EXTLOAD)
Load = DAG.getLoad(VT, DL, LN0->getChain(), NewPtr,
- LN0->getPointerInfo().getWithOffset(PtrOff), NewAlign,
+ LN0->getPointerInfo().getWithOffset(PtrOff),
+ LN0->getOriginalAlign(),
LN0->getMemOperand()->getFlags(), LN0->getAAInfo());
else
Load = DAG.getExtLoad(ExtType, DL, VT, LN0->getChain(), NewPtr,
LN0->getPointerInfo().getWithOffset(PtrOff), ExtVT,
- NewAlign, LN0->getMemOperand()->getFlags(),
- LN0->getAAInfo());
+ LN0->getOriginalAlign(),
+ LN0->getMemOperand()->getFlags(), LN0->getAAInfo());
// Replace the old load's chain with the new load's chain.
WorklistRemover DeadNodes(*this);
diff --git a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index 6cf5408..4172fbc 100644
--- a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -249,7 +249,8 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
"WinEHPrepare failed to remove PHIs from imaginary BBs");
continue;
}
- if (isa<FuncletPadInst>(PadInst))
+ if (isa<FuncletPadInst>(PadInst) &&
+ Personality != EHPersonality::Wasm_CXX)
assert(&*BB.begin() == PadInst && "WinEHPrepare failed to demote PHIs");
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 892bfbd..252b6e9 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -1127,8 +1127,9 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
Action = TargetLowering::Custom;
break;
case ISD::READCYCLECOUNTER:
- // READCYCLECOUNTER returns an i64, even if type legalization might have
- // expanded that to several smaller types.
+ case ISD::READSTEADYCOUNTER:
+ // READCYCLECOUNTER and READSTEADYCOUNTER return a i64, even if type
+ // legalization might have expanded that to several smaller types.
Action = TLI.getOperationAction(Node->getOpcode(), MVT::i64);
break;
case ISD::READ_REGISTER:
@@ -3080,6 +3081,7 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
Results.push_back(Node->getOperand(0));
break;
case ISD::READCYCLECOUNTER:
+ case ISD::READSTEADYCOUNTER:
// If the target didn't expand this, just return 'zero' and preserve the
// chain.
Results.append(Node->getNumValues() - 1,
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 39b7e06..a4ba261 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -2648,7 +2648,8 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::LLRINT: ExpandIntRes_XROUND_XRINT(N, Lo, Hi); break;
case ISD::LOAD: ExpandIntRes_LOAD(cast<LoadSDNode>(N), Lo, Hi); break;
case ISD::MUL: ExpandIntRes_MUL(N, Lo, Hi); break;
- case ISD::READCYCLECOUNTER: ExpandIntRes_READCYCLECOUNTER(N, Lo, Hi); break;
+ case ISD::READCYCLECOUNTER:
+ case ISD::READSTEADYCOUNTER: ExpandIntRes_READCOUNTER(N, Lo, Hi); break;
case ISD::SDIV: ExpandIntRes_SDIV(N, Lo, Hi); break;
case ISD::SIGN_EXTEND: ExpandIntRes_SIGN_EXTEND(N, Lo, Hi); break;
case ISD::SIGN_EXTEND_INREG: ExpandIntRes_SIGN_EXTEND_INREG(N, Lo, Hi); break;
@@ -2824,25 +2825,26 @@ void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, const APInt &Amt,
EVT NVT = InL.getValueType();
unsigned VTBits = N->getValueType(0).getSizeInBits();
unsigned NVTBits = NVT.getSizeInBits();
- EVT ShTy = N->getOperand(1).getValueType();
if (N->getOpcode() == ISD::SHL) {
if (Amt.uge(VTBits)) {
Lo = Hi = DAG.getConstant(0, DL, NVT);
} else if (Amt.ugt(NVTBits)) {
Lo = DAG.getConstant(0, DL, NVT);
- Hi = DAG.getNode(ISD::SHL, DL,
- NVT, InL, DAG.getConstant(Amt - NVTBits, DL, ShTy));
+ Hi = DAG.getNode(ISD::SHL, DL, NVT, InL,
+ DAG.getShiftAmountConstant(Amt - NVTBits, NVT, DL));
} else if (Amt == NVTBits) {
Lo = DAG.getConstant(0, DL, NVT);
Hi = InL;
} else {
- Lo = DAG.getNode(ISD::SHL, DL, NVT, InL, DAG.getConstant(Amt, DL, ShTy));
- Hi = DAG.getNode(ISD::OR, DL, NVT,
- DAG.getNode(ISD::SHL, DL, NVT, InH,
- DAG.getConstant(Amt, DL, ShTy)),
- DAG.getNode(ISD::SRL, DL, NVT, InL,
- DAG.getConstant(-Amt + NVTBits, DL, ShTy)));
+ Lo = DAG.getNode(ISD::SHL, DL, NVT, InL,
+ DAG.getShiftAmountConstant(Amt, NVT, DL));
+ Hi = DAG.getNode(
+ ISD::OR, DL, NVT,
+ DAG.getNode(ISD::SHL, DL, NVT, InH,
+ DAG.getShiftAmountConstant(Amt, NVT, DL)),
+ DAG.getNode(ISD::SRL, DL, NVT, InL,
+ DAG.getShiftAmountConstant(-Amt + NVTBits, NVT, DL)));
}
return;
}
@@ -2851,19 +2853,21 @@ void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, const APInt &Amt,
if (Amt.uge(VTBits)) {
Lo = Hi = DAG.getConstant(0, DL, NVT);
} else if (Amt.ugt(NVTBits)) {
- Lo = DAG.getNode(ISD::SRL, DL,
- NVT, InH, DAG.getConstant(Amt - NVTBits, DL, ShTy));
+ Lo = DAG.getNode(ISD::SRL, DL, NVT, InH,
+ DAG.getShiftAmountConstant(Amt - NVTBits, NVT, DL));
Hi = DAG.getConstant(0, DL, NVT);
} else if (Amt == NVTBits) {
Lo = InH;
Hi = DAG.getConstant(0, DL, NVT);
} else {
- Lo = DAG.getNode(ISD::OR, DL, NVT,
- DAG.getNode(ISD::SRL, DL, NVT, InL,
- DAG.getConstant(Amt, DL, ShTy)),
- DAG.getNode(ISD::SHL, DL, NVT, InH,
- DAG.getConstant(-Amt + NVTBits, DL, ShTy)));
- Hi = DAG.getNode(ISD::SRL, DL, NVT, InH, DAG.getConstant(Amt, DL, ShTy));
+ Lo = DAG.getNode(
+ ISD::OR, DL, NVT,
+ DAG.getNode(ISD::SRL, DL, NVT, InL,
+ DAG.getShiftAmountConstant(Amt, NVT, DL)),
+ DAG.getNode(ISD::SHL, DL, NVT, InH,
+ DAG.getShiftAmountConstant(-Amt + NVTBits, NVT, DL)));
+ Hi = DAG.getNode(ISD::SRL, DL, NVT, InH,
+ DAG.getShiftAmountConstant(Amt, NVT, DL));
}
return;
}
@@ -2871,23 +2875,25 @@ void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, const APInt &Amt,
assert(N->getOpcode() == ISD::SRA && "Unknown shift!");
if (Amt.uge(VTBits)) {
Hi = Lo = DAG.getNode(ISD::SRA, DL, NVT, InH,
- DAG.getConstant(NVTBits - 1, DL, ShTy));
+ DAG.getShiftAmountConstant(NVTBits - 1, NVT, DL));
} else if (Amt.ugt(NVTBits)) {
Lo = DAG.getNode(ISD::SRA, DL, NVT, InH,
- DAG.getConstant(Amt - NVTBits, DL, ShTy));
+ DAG.getShiftAmountConstant(Amt - NVTBits, NVT, DL));
Hi = DAG.getNode(ISD::SRA, DL, NVT, InH,
- DAG.getConstant(NVTBits - 1, DL, ShTy));
+ DAG.getShiftAmountConstant(NVTBits - 1, NVT, DL));
} else if (Amt == NVTBits) {
Lo = InH;
Hi = DAG.getNode(ISD::SRA, DL, NVT, InH,
- DAG.getConstant(NVTBits - 1, DL, ShTy));
+ DAG.getShiftAmountConstant(NVTBits - 1, NVT, DL));
} else {
- Lo = DAG.getNode(ISD::OR, DL, NVT,
- DAG.getNode(ISD::SRL, DL, NVT, InL,
- DAG.getConstant(Amt, DL, ShTy)),
- DAG.getNode(ISD::SHL, DL, NVT, InH,
- DAG.getConstant(-Amt + NVTBits, DL, ShTy)));
- Hi = DAG.getNode(ISD::SRA, DL, NVT, InH, DAG.getConstant(Amt, DL, ShTy));
+ Lo = DAG.getNode(
+ ISD::OR, DL, NVT,
+ DAG.getNode(ISD::SRL, DL, NVT, InL,
+ DAG.getShiftAmountConstant(Amt, NVT, DL)),
+ DAG.getNode(ISD::SHL, DL, NVT, InH,
+ DAG.getShiftAmountConstant(-Amt + NVTBits, NVT, DL)));
+ Hi = DAG.getNode(ISD::SRA, DL, NVT, InH,
+ DAG.getShiftAmountConstant(Amt, NVT, DL));
}
}
@@ -4026,8 +4032,8 @@ void DAGTypeLegalizer::ExpandIntRes_MUL(SDNode *N,
Lo, Hi);
}
-void DAGTypeLegalizer::ExpandIntRes_READCYCLECOUNTER(SDNode *N, SDValue &Lo,
- SDValue &Hi) {
+void DAGTypeLegalizer::ExpandIntRes_READCOUNTER(SDNode *N, SDValue &Lo,
+ SDValue &Hi) {
SDLoc DL(N);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDVTList VTs = DAG.getVTList(NVT, NVT, MVT::Other);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index 09f0bca..9114987 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -439,7 +439,7 @@ private:
void ExpandIntRes_CTPOP (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_CTTZ (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_LOAD (LoadSDNode *N, SDValue &Lo, SDValue &Hi);
- void ExpandIntRes_READCYCLECOUNTER (SDNode *N, SDValue &Lo, SDValue &Hi);
+ void ExpandIntRes_READCOUNTER (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_SIGN_EXTEND (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_SIGN_EXTEND_INREG (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_TRUNCATE (SDNode *N, SDValue &Lo, SDValue &Hi);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 55eee78..421bb51 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -1734,6 +1734,12 @@ SDValue SelectionDAG::getShiftAmountConstant(uint64_t Val, EVT VT,
return getConstant(Val, DL, ShiftVT);
}
+SDValue SelectionDAG::getShiftAmountConstant(const APInt &Val, EVT VT,
+ const SDLoc &DL, bool LegalTypes) {
+ assert(Val.ult(VT.getScalarSizeInBits()) && "Out of range shift");
+ return getShiftAmountConstant(Val.getZExtValue(), VT, DL, LegalTypes);
+}
+
SDValue SelectionDAG::getVectorIdxConstant(uint64_t Val, const SDLoc &DL,
bool isTarget) {
return getConstant(Val, DL, TLI->getVectorIdxTy(getDataLayout()), isTarget);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 5ce1013..28664b2 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -6781,6 +6781,14 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
DAG.setRoot(Res.getValue(1));
return;
}
+ case Intrinsic::readsteadycounter: {
+ SDValue Op = getRoot();
+ Res = DAG.getNode(ISD::READSTEADYCOUNTER, sdl,
+ DAG.getVTList(MVT::i64, MVT::Other), Op);
+ setValue(&I, Res);
+ DAG.setRoot(Res.getValue(1));
+ return;
+ }
case Intrinsic::bitreverse:
setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl,
getValue(I.getArgOperand(0)).getValueType(),
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index a28d834..0fbd999 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -104,6 +104,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::ATOMIC_STORE: return "AtomicStore";
case ISD::PCMARKER: return "PCMarker";
case ISD::READCYCLECOUNTER: return "ReadCycleCounter";
+ case ISD::READSTEADYCOUNTER: return "ReadSteadyCounter";
case ISD::SRCVALUE: return "SrcValue";
case ISD::MDNODE_SDNODE: return "MDNode";
case ISD::EntryToken: return "EntryToken";
diff --git a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
index 1320479..d7f4d1c 100644
--- a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
@@ -1287,7 +1287,7 @@ void SelectionDAGBuilder::visitGCRelocate(const GCRelocateInst &Relocate) {
if (SD.isUndef() && SD.getValueType().getSizeInBits() <= 64) {
// Lowering relocate(undef) as arbitrary constant. Current constant value
// is chosen such that it's unlikely to be a valid pointer.
- setValue(&Relocate, DAG.getTargetConstant(0xFEFEFEFE, SDLoc(SD), MVT::i64));
+ setValue(&Relocate, DAG.getConstant(0xFEFEFEFE, SDLoc(SD), MVT::i64));
return;
}
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index d8302ba..646c0c3 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -123,7 +123,7 @@ void TargetLoweringBase::InitLibcalls(const Triple &TT) {
setLibcallCallingConv((RTLIB::Libcall)LC, CallingConv::C);
// Use the f128 variants of math functions on x86_64
- if (TT.getArch() == Triple::ArchType::x86_64) {
+ if (TT.getArch() == Triple::ArchType::x86_64 && TT.isGNUEnvironment()) {
setLibcallName(RTLIB::REM_F128, "fmodf128");
setLibcallName(RTLIB::FMA_F128, "fmaf128");
setLibcallName(RTLIB::SQRT_F128, "sqrtf128");
@@ -964,6 +964,9 @@ void TargetLoweringBase::initActions() {
// Most targets also ignore the @llvm.readcyclecounter intrinsic.
setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Expand);
+ // Most targets also ignore the @llvm.readsteadycounter intrinsic.
+ setOperationAction(ISD::READSTEADYCOUNTER, MVT::i64, Expand);
+
// ConstantFP nodes default to expand. Targets can either change this to
// Legal, in which case all fp constants are legal, or use isFPImmLegal()
// to optimize expansions for certain constants.
diff --git a/llvm/lib/CodeGen/TargetPassConfig.cpp b/llvm/lib/CodeGen/TargetPassConfig.cpp
index e82f14e..2ed39a5 100644
--- a/llvm/lib/CodeGen/TargetPassConfig.cpp
+++ b/llvm/lib/CodeGen/TargetPassConfig.cpp
@@ -918,7 +918,7 @@ void TargetPassConfig::addPassesToHandleExceptions() {
// on catchpads and cleanuppads because it does not outline them into
// funclets. Catchswitch blocks are not lowered in SelectionDAG, so we
// should remove PHIs there.
- addPass(createWinEHPass(/*DemoteCatchSwitchPHIOnly=*/false));
+ addPass(createWinEHPass(/*DemoteCatchSwitchPHIOnly=*/true));
addPass(createWasmEHPass());
break;
case ExceptionHandling::None:
diff --git a/llvm/lib/CodeGen/TypePromotion.cpp b/llvm/lib/CodeGen/TypePromotion.cpp
index 053caf5..48ad8de 100644
--- a/llvm/lib/CodeGen/TypePromotion.cpp
+++ b/llvm/lib/CodeGen/TypePromotion.cpp
@@ -359,22 +359,21 @@ bool TypePromotionImpl::isSafeWrap(Instruction *I) {
if (!OverflowConst.isNonPositive())
return false;
+ SafeWrap.insert(I);
+
// Using C1 = OverflowConst and C2 = ICmpConst, we can either prove that:
// zext(x) + sext(C1) <u zext(C2) if C1 < 0 and C1 >s C2
// zext(x) + sext(C1) <u sext(C2) if C1 < 0 and C1 <=s C2
if (OverflowConst.sgt(ICmpConst)) {
LLVM_DEBUG(dbgs() << "IR Promotion: Allowing safe overflow for sext "
<< "const of " << *I << "\n");
- SafeWrap.insert(I);
- return true;
- } else {
- LLVM_DEBUG(dbgs() << "IR Promotion: Allowing safe overflow for sext "
- << "const of " << *I << " and " << *CI << "\n");
- SafeWrap.insert(I);
- SafeWrap.insert(CI);
return true;
}
- return false;
+
+ LLVM_DEBUG(dbgs() << "IR Promotion: Allowing safe overflow for sext "
+ << "const of " << *I << " and " << *CI << "\n");
+ SafeWrap.insert(CI);
+ return true;
}
bool TypePromotionImpl::shouldPromote(Value *V) {
@@ -937,6 +936,8 @@ bool TypePromotionImpl::run(Function &F, const TargetMachine *TM,
return 0;
EVT PromotedVT = TLI->getTypeToTransformTo(*Ctx, SrcVT);
+ if (TLI->isSExtCheaperThanZExt(SrcVT, PromotedVT))
+ return 0;
if (RegisterBitWidth < PromotedVT.getFixedSizeInBits()) {
LLVM_DEBUG(dbgs() << "IR Promotion: Couldn't find target register "
<< "for promoted type\n");
diff --git a/llvm/lib/Frontend/Driver/CodeGenOptions.cpp b/llvm/lib/Frontend/Driver/CodeGenOptions.cpp
index 96c5b19..2d74a91 100644
--- a/llvm/lib/Frontend/Driver/CodeGenOptions.cpp
+++ b/llvm/lib/Frontend/Driver/CodeGenOptions.cpp
@@ -46,6 +46,10 @@ TargetLibraryInfoImpl *createTLII(llvm::Triple &TargetTriple,
TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::ArmPL,
TargetTriple);
break;
+ case VectorLibrary::AMDLIBM:
+ TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::AMDLIBM,
+ TargetTriple);
+ break;
default:
break;
}
diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp
index c6ef332..0ae720e 100644
--- a/llvm/lib/IR/AsmWriter.cpp
+++ b/llvm/lib/IR/AsmWriter.cpp
@@ -1406,6 +1406,99 @@ static void WriteOptimizationInfo(raw_ostream &Out, const User *U) {
}
}
+static void WriteAPFloatInternal(raw_ostream &Out, const APFloat &APF) {
+ if (&APF.getSemantics() == &APFloat::IEEEsingle() ||
+ &APF.getSemantics() == &APFloat::IEEEdouble()) {
+ // We would like to output the FP constant value in exponential notation,
+ // but we cannot do this if doing so will lose precision. Check here to
+ // make sure that we only output it in exponential format if we can parse
+ // the value back and get the same value.
+ //
+ bool ignored;
+ bool isDouble = &APF.getSemantics() == &APFloat::IEEEdouble();
+ bool isInf = APF.isInfinity();
+ bool isNaN = APF.isNaN();
+
+ if (!isInf && !isNaN) {
+ double Val = APF.convertToDouble();
+ SmallString<128> StrVal;
+ APF.toString(StrVal, 6, 0, false);
+ // Check to make sure that the stringized number is not some string like
+ // "Inf" or NaN, that atof will accept, but the lexer will not. Check
+ // that the string matches the "[-+]?[0-9]" regex.
+ //
+ assert((isDigit(StrVal[0]) ||
+ ((StrVal[0] == '-' || StrVal[0] == '+') && isDigit(StrVal[1]))) &&
+ "[-+]?[0-9] regex does not match!");
+ // Reparse stringized version!
+ if (APFloat(APFloat::IEEEdouble(), StrVal).convertToDouble() == Val) {
+ Out << StrVal;
+ return;
+ }
+ }
+
+ // Otherwise we could not reparse it to exactly the same value, so we must
+ // output the string in hexadecimal format! Note that loading and storing
+ // floating point types changes the bits of NaNs on some hosts, notably
+ // x86, so we must not use these types.
+ static_assert(sizeof(double) == sizeof(uint64_t),
+ "assuming that double is 64 bits!");
+ APFloat apf = APF;
+
+ // Floats are represented in ASCII IR as double, convert.
+ // FIXME: We should allow 32-bit hex float and remove this.
+ if (!isDouble) {
+ // A signaling NaN is quieted on conversion, so we need to recreate the
+ // expected value after convert (quiet bit of the payload is clear).
+ bool IsSNAN = apf.isSignaling();
+ apf.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
+ &ignored);
+ if (IsSNAN) {
+ APInt Payload = apf.bitcastToAPInt();
+ apf =
+ APFloat::getSNaN(APFloat::IEEEdouble(), apf.isNegative(), &Payload);
+ }
+ }
+
+ Out << format_hex(apf.bitcastToAPInt().getZExtValue(), 0, /*Upper=*/true);
+ return;
+ }
+
+ // Either half, bfloat or some form of long double.
+ // These appear as a magic letter identifying the type, then a
+ // fixed number of hex digits.
+ Out << "0x";
+ APInt API = APF.bitcastToAPInt();
+ if (&APF.getSemantics() == &APFloat::x87DoubleExtended()) {
+ Out << 'K';
+ Out << format_hex_no_prefix(API.getHiBits(16).getZExtValue(), 4,
+ /*Upper=*/true);
+ Out << format_hex_no_prefix(API.getLoBits(64).getZExtValue(), 16,
+ /*Upper=*/true);
+ } else if (&APF.getSemantics() == &APFloat::IEEEquad()) {
+ Out << 'L';
+ Out << format_hex_no_prefix(API.getLoBits(64).getZExtValue(), 16,
+ /*Upper=*/true);
+ Out << format_hex_no_prefix(API.getHiBits(64).getZExtValue(), 16,
+ /*Upper=*/true);
+ } else if (&APF.getSemantics() == &APFloat::PPCDoubleDouble()) {
+ Out << 'M';
+ Out << format_hex_no_prefix(API.getLoBits(64).getZExtValue(), 16,
+ /*Upper=*/true);
+ Out << format_hex_no_prefix(API.getHiBits(64).getZExtValue(), 16,
+ /*Upper=*/true);
+ } else if (&APF.getSemantics() == &APFloat::IEEEhalf()) {
+ Out << 'H';
+ Out << format_hex_no_prefix(API.getZExtValue(), 4,
+ /*Upper=*/true);
+ } else if (&APF.getSemantics() == &APFloat::BFloat()) {
+ Out << 'R';
+ Out << format_hex_no_prefix(API.getZExtValue(), 4,
+ /*Upper=*/true);
+ } else
+ llvm_unreachable("Unsupported floating point type");
+}
+
static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
AsmWriterContext &WriterCtx) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) {
@@ -1418,94 +1511,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
}
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV)) {
- const APFloat &APF = CFP->getValueAPF();
- if (&APF.getSemantics() == &APFloat::IEEEsingle() ||
- &APF.getSemantics() == &APFloat::IEEEdouble()) {
- // We would like to output the FP constant value in exponential notation,
- // but we cannot do this if doing so will lose precision. Check here to
- // make sure that we only output it in exponential format if we can parse
- // the value back and get the same value.
- //
- bool ignored;
- bool isDouble = &APF.getSemantics() == &APFloat::IEEEdouble();
- bool isInf = APF.isInfinity();
- bool isNaN = APF.isNaN();
- if (!isInf && !isNaN) {
- double Val = APF.convertToDouble();
- SmallString<128> StrVal;
- APF.toString(StrVal, 6, 0, false);
- // Check to make sure that the stringized number is not some string like
- // "Inf" or NaN, that atof will accept, but the lexer will not. Check
- // that the string matches the "[-+]?[0-9]" regex.
- //
- assert((isDigit(StrVal[0]) || ((StrVal[0] == '-' || StrVal[0] == '+') &&
- isDigit(StrVal[1]))) &&
- "[-+]?[0-9] regex does not match!");
- // Reparse stringized version!
- if (APFloat(APFloat::IEEEdouble(), StrVal).convertToDouble() == Val) {
- Out << StrVal;
- return;
- }
- }
- // Otherwise we could not reparse it to exactly the same value, so we must
- // output the string in hexadecimal format! Note that loading and storing
- // floating point types changes the bits of NaNs on some hosts, notably
- // x86, so we must not use these types.
- static_assert(sizeof(double) == sizeof(uint64_t),
- "assuming that double is 64 bits!");
- APFloat apf = APF;
- // Floats are represented in ASCII IR as double, convert.
- // FIXME: We should allow 32-bit hex float and remove this.
- if (!isDouble) {
- // A signaling NaN is quieted on conversion, so we need to recreate the
- // expected value after convert (quiet bit of the payload is clear).
- bool IsSNAN = apf.isSignaling();
- apf.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
- &ignored);
- if (IsSNAN) {
- APInt Payload = apf.bitcastToAPInt();
- apf = APFloat::getSNaN(APFloat::IEEEdouble(), apf.isNegative(),
- &Payload);
- }
- }
- Out << format_hex(apf.bitcastToAPInt().getZExtValue(), 0, /*Upper=*/true);
- return;
- }
-
- // Either half, bfloat or some form of long double.
- // These appear as a magic letter identifying the type, then a
- // fixed number of hex digits.
- Out << "0x";
- APInt API = APF.bitcastToAPInt();
- if (&APF.getSemantics() == &APFloat::x87DoubleExtended()) {
- Out << 'K';
- Out << format_hex_no_prefix(API.getHiBits(16).getZExtValue(), 4,
- /*Upper=*/true);
- Out << format_hex_no_prefix(API.getLoBits(64).getZExtValue(), 16,
- /*Upper=*/true);
- return;
- } else if (&APF.getSemantics() == &APFloat::IEEEquad()) {
- Out << 'L';
- Out << format_hex_no_prefix(API.getLoBits(64).getZExtValue(), 16,
- /*Upper=*/true);
- Out << format_hex_no_prefix(API.getHiBits(64).getZExtValue(), 16,
- /*Upper=*/true);
- } else if (&APF.getSemantics() == &APFloat::PPCDoubleDouble()) {
- Out << 'M';
- Out << format_hex_no_prefix(API.getLoBits(64).getZExtValue(), 16,
- /*Upper=*/true);
- Out << format_hex_no_prefix(API.getHiBits(64).getZExtValue(), 16,
- /*Upper=*/true);
- } else if (&APF.getSemantics() == &APFloat::IEEEhalf()) {
- Out << 'H';
- Out << format_hex_no_prefix(API.getZExtValue(), 4,
- /*Upper=*/true);
- } else if (&APF.getSemantics() == &APFloat::BFloat()) {
- Out << 'R';
- Out << format_hex_no_prefix(API.getZExtValue(), 4,
- /*Upper=*/true);
- } else
- llvm_unreachable("Unsupported floating point type");
+ WriteAPFloatInternal(Out, CFP->getValueAPF());
return;
}
diff --git a/llvm/lib/IR/BasicBlock.cpp b/llvm/lib/IR/BasicBlock.cpp
index fe9d0d0..bf02eba 100644
--- a/llvm/lib/IR/BasicBlock.cpp
+++ b/llvm/lib/IR/BasicBlock.cpp
@@ -34,7 +34,7 @@ cl::opt<bool>
UseNewDbgInfoFormat("experimental-debuginfo-iterators",
cl::desc("Enable communicating debuginfo positions "
"through iterators, eliminating intrinsics"),
- cl::init(false));
+ cl::init(true));
DPMarker *BasicBlock::createMarker(Instruction *I) {
assert(IsNewDbgInfoFormat &&
diff --git a/llvm/lib/IR/DebugInfo.cpp b/llvm/lib/IR/DebugInfo.cpp
index d8c1b0d..eaa5cb3 100644
--- a/llvm/lib/IR/DebugInfo.cpp
+++ b/llvm/lib/IR/DebugInfo.cpp
@@ -2214,10 +2214,6 @@ bool AssignmentTrackingPass::runOnFunction(Function &F) {
if (F.hasFnAttribute(Attribute::OptimizeNone))
return /*Changed*/ false;
- // FIXME: https://github.com/llvm/llvm-project/issues/76545
- if (F.hasFnAttribute(Attribute::SanitizeHWAddress))
- return /*Changed*/ false;
-
bool Changed = false;
auto *DL = &F.getParent()->getDataLayout();
// Collect a map of {backing storage : dbg.declares} (currently "backing
diff --git a/llvm/lib/IR/IntrinsicInst.cpp b/llvm/lib/IR/IntrinsicInst.cpp
index 7a3b708..5050091 100644
--- a/llvm/lib/IR/IntrinsicInst.cpp
+++ b/llvm/lib/IR/IntrinsicInst.cpp
@@ -623,7 +623,7 @@ bool VPIntrinsic::canIgnoreVectorLengthParam() const {
if (EC.isScalable()) {
// Compare vscale patterns
uint64_t VScaleFactor;
- if (match(VLParam, m_c_Mul(m_ConstantInt(VScaleFactor), m_VScale())))
+ if (match(VLParam, m_Mul(m_VScale(), m_ConstantInt(VScaleFactor))))
return VScaleFactor >= EC.getKnownMinValue();
return (EC.getKnownMinValue() == 1) && match(VLParam, m_VScale());
}
diff --git a/llvm/lib/Object/COFFImportFile.cpp b/llvm/lib/Object/COFFImportFile.cpp
index 9175c3e..f6f6cf2 100644
--- a/llvm/lib/Object/COFFImportFile.cpp
+++ b/llvm/lib/Object/COFFImportFile.cpp
@@ -625,8 +625,8 @@ NewArchiveMember ObjectFactory::createWeakExternal(StringRef Sym,
Error writeImportLibrary(StringRef ImportName, StringRef Path,
ArrayRef<COFFShortExport> Exports,
- ArrayRef<COFFShortExport> NativeExports,
- MachineTypes Machine, bool MinGW) {
+ MachineTypes Machine, bool MinGW,
+ ArrayRef<COFFShortExport> NativeExports) {
MachineTypes NativeMachine =
isArm64EC(Machine) ? IMAGE_FILE_MACHINE_ARM64 : Machine;
diff --git a/llvm/lib/Object/ELFObjectFile.cpp b/llvm/lib/Object/ELFObjectFile.cpp
index 01949c6..33be481 100644
--- a/llvm/lib/Object/ELFObjectFile.cpp
+++ b/llvm/lib/Object/ELFObjectFile.cpp
@@ -519,9 +519,9 @@ StringRef ELFObjectFileBase::getAMDGPUCPUName() const {
case ELF::EF_AMDGPU_MACH_AMDGCN_GFX9_GENERIC:
return "gfx9-generic";
case ELF::EF_AMDGPU_MACH_AMDGCN_GFX10_1_GENERIC:
- return "gfx10.1-generic";
+ return "gfx10-1-generic";
case ELF::EF_AMDGPU_MACH_AMDGCN_GFX10_3_GENERIC:
- return "gfx10.3-generic";
+ return "gfx10-3-generic";
case ELF::EF_AMDGPU_MACH_AMDGCN_GFX11_GENERIC:
return "gfx11-generic";
default:
diff --git a/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp b/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp
index eb0996e..ddce758 100644
--- a/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp
+++ b/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp
@@ -234,6 +234,7 @@ class MCDCRecordProcessor {
/// Decision Region to which the ExecutedTestVectorBitmap applies.
const CounterMappingRegion &Region;
+ const mcdc::DecisionParameters &DecisionParams;
/// Array of branch regions corresponding each conditions in the boolean
/// expression.
@@ -244,8 +245,8 @@ class MCDCRecordProcessor {
unsigned BitmapIdx;
- /// Mapping of a condition ID to its corresponding branch region.
- llvm::DenseMap<unsigned, const CounterMappingRegion *> Map;
+ /// Mapping of a condition ID to its corresponding branch params.
+ llvm::DenseMap<unsigned, mcdc::ConditionIDs> CondsMap;
/// Vector used to track whether a condition is constant folded.
MCDCRecord::BoolVector Folded;
@@ -261,57 +262,54 @@ public:
MCDCRecordProcessor(const BitVector &Bitmap,
const CounterMappingRegion &Region,
ArrayRef<const CounterMappingRegion *> Branches)
- : Bitmap(Bitmap), Region(Region), Branches(Branches),
- NumConditions(Region.MCDCParams.NumConditions),
- BitmapIdx(Region.MCDCParams.BitmapIdx * CHAR_BIT),
+ : Bitmap(Bitmap), Region(Region),
+ DecisionParams(Region.getDecisionParams()), Branches(Branches),
+ NumConditions(DecisionParams.NumConditions),
+ BitmapIdx(DecisionParams.BitmapIdx * CHAR_BIT),
Folded(NumConditions, false), IndependencePairs(NumConditions) {}
private:
- void recordTestVector(MCDCRecord::TestVector &TV, unsigned Index,
- MCDCRecord::CondState Result) {
- if (!Bitmap[BitmapIdx + Index])
- return;
-
- // Copy the completed test vector to the vector of testvectors.
- ExecVectors.push_back(TV);
-
- // The final value (T,F) is equal to the last non-dontcare state on the
- // path (in a short-circuiting system).
- ExecVectors.back().push_back(Result);
- }
-
// Walk the binary decision diagram and try assigning both false and true to
// each node. When a terminal node (ID == 0) is reached, fill in the value in
// the truth table.
- void buildTestVector(MCDCRecord::TestVector &TV, unsigned ID,
+ void buildTestVector(MCDCRecord::TestVector &TV, mcdc::ConditionID ID,
unsigned Index) {
- const CounterMappingRegion *Branch = Map[ID];
+ assert((Index & (1 << ID)) == 0);
+
+ for (auto MCDCCond : {MCDCRecord::MCDC_False, MCDCRecord::MCDC_True}) {
+ static_assert(MCDCRecord::MCDC_False == 0);
+ static_assert(MCDCRecord::MCDC_True == 1);
+ Index |= MCDCCond << ID;
+ TV[ID] = MCDCCond;
+ auto NextID = CondsMap[ID][MCDCCond];
+ if (NextID >= 0) {
+ buildTestVector(TV, NextID, Index);
+ continue;
+ }
- TV[ID - 1] = MCDCRecord::MCDC_False;
- if (Branch->MCDCParams.FalseID > 0)
- buildTestVector(TV, Branch->MCDCParams.FalseID, Index);
- else
- recordTestVector(TV, Index, MCDCRecord::MCDC_False);
+ if (!Bitmap[BitmapIdx + Index])
+ continue;
- Index |= 1 << (ID - 1);
- TV[ID - 1] = MCDCRecord::MCDC_True;
- if (Branch->MCDCParams.TrueID > 0)
- buildTestVector(TV, Branch->MCDCParams.TrueID, Index);
- else
- recordTestVector(TV, Index, MCDCRecord::MCDC_True);
+ // Copy the completed test vector to the vector of testvectors.
+ ExecVectors.push_back(TV);
+
+ // The final value (T,F) is equal to the last non-dontcare state on the
+ // path (in a short-circuiting system).
+ ExecVectors.back().push_back(MCDCCond);
+ }
// Reset back to DontCare.
- TV[ID - 1] = MCDCRecord::MCDC_DontCare;
+ TV[ID] = MCDCRecord::MCDC_DontCare;
}
/// Walk the bits in the bitmap. A bit set to '1' indicates that the test
/// vector at the corresponding index was executed during a test run.
void findExecutedTestVectors() {
// Walk the binary decision diagram to enumerate all possible test vectors.
- // We start at the root node (ID == 1) with all values being DontCare.
+ // We start at the root node (ID == 0) with all values being DontCare.
// `Index` encodes the bitmask of true values and is initially 0.
MCDCRecord::TestVector TV(NumConditions, MCDCRecord::MCDC_DontCare);
- buildTestVector(TV, 1, 0);
+ buildTestVector(TV, 0, 0);
}
// Find an independence pair for each condition:
@@ -371,8 +369,9 @@ public:
// - Record whether the condition is constant folded so that we exclude it
// from being measured.
for (const auto *B : Branches) {
- Map[B->MCDCParams.ID] = B;
- PosToID[I] = B->MCDCParams.ID - 1;
+ const auto &BranchParams = B->getBranchParams();
+ CondsMap[BranchParams.ID] = BranchParams.Conds;
+ PosToID[I] = BranchParams.ID;
CondLoc[I] = B->startLoc();
Folded[I++] = (B->Count.isZero() && B->FalseCount.isZero());
}
@@ -385,9 +384,9 @@ public:
findIndependencePairs();
// Record Test vectors, executed vectors, and independence pairs.
- MCDCRecord Res(Region, ExecVectors, IndependencePairs, Folded, PosToID,
- CondLoc);
- return Res;
+ return MCDCRecord(Region, std::move(ExecVectors),
+ std::move(IndependencePairs), std::move(Folded),
+ std::move(PosToID), std::move(CondLoc));
}
};
@@ -492,10 +491,12 @@ static unsigned getMaxBitmapSize(const CounterMappingContext &Ctx,
// Note that `<=` is used insted of `<`, because `BitmapIdx == 0` is valid
// and `MaxBitmapIdx is `unsigned`. `BitmapIdx` is unique in the record.
for (const auto &Region : reverse(Record.MappingRegions)) {
- if (Region.Kind == CounterMappingRegion::MCDCDecisionRegion &&
- MaxBitmapIdx <= Region.MCDCParams.BitmapIdx) {
- MaxBitmapIdx = Region.MCDCParams.BitmapIdx;
- NumConditions = Region.MCDCParams.NumConditions;
+ if (Region.Kind != CounterMappingRegion::MCDCDecisionRegion)
+ continue;
+ const auto &DecisionParams = Region.getDecisionParams();
+ if (MaxBitmapIdx <= DecisionParams.BitmapIdx) {
+ MaxBitmapIdx = DecisionParams.BitmapIdx;
+ NumConditions = DecisionParams.NumConditions;
}
}
unsigned SizeInBits = llvm::alignTo(uint64_t(1) << NumConditions, CHAR_BIT);
@@ -515,6 +516,7 @@ private:
const CounterMappingRegion *DecisionRegion;
/// They are reflected from DecisionRegion for convenience.
+ mcdc::DecisionParameters DecisionParams;
LineColPair DecisionStartLoc;
LineColPair DecisionEndLoc;
@@ -524,7 +526,7 @@ private:
/// IDs that are stored in MCDCBranches
/// Complete when all IDs (1 to NumConditions) are met.
- DenseSet<CounterMappingRegion::MCDCConditionID> ConditionIDs;
+ DenseSet<mcdc::ConditionID> ConditionIDs;
/// Set of IDs of Expansion(s) that are relevant to DecisionRegion
/// and its children (via expansions).
@@ -533,7 +535,9 @@ private:
DenseSet<unsigned> ExpandedFileIDs;
DecisionRecord(const CounterMappingRegion &Decision)
- : DecisionRegion(&Decision), DecisionStartLoc(Decision.startLoc()),
+ : DecisionRegion(&Decision),
+ DecisionParams(Decision.getDecisionParams()),
+ DecisionStartLoc(Decision.startLoc()),
DecisionEndLoc(Decision.endLoc()) {
assert(Decision.Kind == CounterMappingRegion::MCDCDecisionRegion);
}
@@ -561,21 +565,21 @@ private:
Result addBranch(const CounterMappingRegion &Branch) {
assert(Branch.Kind == CounterMappingRegion::MCDCBranchRegion);
- auto ConditionID = Branch.MCDCParams.ID;
- assert(ConditionID > 0 && "ConditionID should begin with 1");
+ auto ConditionID = Branch.getBranchParams().ID;
+ assert(ConditionID >= 0 && "ConditionID should be positive");
if (ConditionIDs.contains(ConditionID) ||
- ConditionID > DecisionRegion->MCDCParams.NumConditions)
+ ConditionID >= DecisionParams.NumConditions)
return NotProcessed;
if (!this->dominates(Branch))
return NotProcessed;
- assert(MCDCBranches.size() < DecisionRegion->MCDCParams.NumConditions);
+ assert(MCDCBranches.size() < DecisionParams.NumConditions);
- // Put `ID=1` in front of `MCDCBranches` for convenience
+ // Put `ID=0` in front of `MCDCBranches` for convenience
// even if `MCDCBranches` is not topological.
- if (ConditionID == 1)
+ if (ConditionID == 0)
MCDCBranches.insert(MCDCBranches.begin(), &Branch);
else
MCDCBranches.push_back(&Branch);
@@ -584,9 +588,8 @@ private:
ConditionIDs.insert(ConditionID);
// `Completed` when `MCDCBranches` is full
- return (MCDCBranches.size() == DecisionRegion->MCDCParams.NumConditions
- ? Completed
- : Processed);
+ return (MCDCBranches.size() == DecisionParams.NumConditions ? Completed
+ : Processed);
}
/// Record Expansion if it is relevant to this Decision.
@@ -761,7 +764,7 @@ Error CoverageMapping::loadFunctionRecord(
}
// Save the MC/DC Record so that it can be visualized later.
- Function.pushMCDCRecord(*Record);
+ Function.pushMCDCRecord(std::move(*Record));
}
// Don't create records for (filenames, function) pairs we've already seen.
diff --git a/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp b/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp
index ac8e6b5..d328460 100644
--- a/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp
+++ b/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp
@@ -244,7 +244,10 @@ Error RawCoverageMappingReader::readMappingRegionsSubArray(
unsigned LineStart = 0;
for (size_t I = 0; I < NumRegions; ++I) {
Counter C, C2;
- uint64_t BIDX = 0, NC = 0, ID = 0, TID = 0, FID = 0;
+ uint64_t BIDX, NC;
+ // They are stored as internal values plus 1 (min is -1)
+ uint64_t ID1, TID1, FID1;
+ mcdc::Parameters Params;
CounterMappingRegion::RegionKind Kind = CounterMappingRegion::CodeRegion;
// Read the combined counter + region kind.
@@ -302,19 +305,29 @@ Error RawCoverageMappingReader::readMappingRegionsSubArray(
return Err;
if (auto Err = readCounter(C2))
return Err;
- if (auto Err = readIntMax(ID, std::numeric_limits<unsigned>::max()))
+ if (auto Err = readIntMax(ID1, std::numeric_limits<int16_t>::max()))
return Err;
- if (auto Err = readIntMax(TID, std::numeric_limits<unsigned>::max()))
+ if (auto Err = readIntMax(TID1, std::numeric_limits<int16_t>::max()))
return Err;
- if (auto Err = readIntMax(FID, std::numeric_limits<unsigned>::max()))
+ if (auto Err = readIntMax(FID1, std::numeric_limits<int16_t>::max()))
return Err;
+ if (ID1 == 0)
+ return make_error<CoverageMapError>(
+ coveragemap_error::malformed,
+ "MCDCConditionID shouldn't be zero");
+ Params = mcdc::BranchParameters{
+ static_cast<int16_t>(static_cast<int16_t>(ID1) - 1),
+ {static_cast<int16_t>(static_cast<int16_t>(FID1) - 1),
+ static_cast<int16_t>(static_cast<int16_t>(TID1) - 1)}};
break;
case CounterMappingRegion::MCDCDecisionRegion:
Kind = CounterMappingRegion::MCDCDecisionRegion;
if (auto Err = readIntMax(BIDX, std::numeric_limits<unsigned>::max()))
return Err;
- if (auto Err = readIntMax(NC, std::numeric_limits<unsigned>::max()))
+ if (auto Err = readIntMax(NC, std::numeric_limits<int16_t>::max()))
return Err;
+ Params = mcdc::DecisionParameters{static_cast<unsigned>(BIDX),
+ static_cast<uint16_t>(NC)};
break;
default:
return make_error<CoverageMapError>(coveragemap_error::malformed,
@@ -370,13 +383,8 @@ Error RawCoverageMappingReader::readMappingRegionsSubArray(
});
auto CMR = CounterMappingRegion(
- C, C2,
- CounterMappingRegion::MCDCParameters{
- static_cast<unsigned>(BIDX), static_cast<unsigned>(NC),
- static_cast<unsigned>(ID), static_cast<unsigned>(TID),
- static_cast<unsigned>(FID)},
- InferredFileID, ExpandedFileID, LineStart, ColumnStart,
- LineStart + NumLines, ColumnEnd, Kind);
+ C, C2, InferredFileID, ExpandedFileID, LineStart, ColumnStart,
+ LineStart + NumLines, ColumnEnd, Kind, Params);
if (CMR.startLoc() > CMR.endLoc())
return make_error<CoverageMapError>(
coveragemap_error::malformed,
diff --git a/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp b/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp
index 27727f2..5036bde 100644
--- a/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp
+++ b/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp
@@ -213,6 +213,7 @@ void CoverageMappingWriter::write(raw_ostream &OS) {
}
Counter Count = Minimizer.adjust(I->Count);
Counter FalseCount = Minimizer.adjust(I->FalseCount);
+ bool ParamsShouldBeNull = true;
switch (I->Kind) {
case CounterMappingRegion::CodeRegion:
case CounterMappingRegion::GapRegion:
@@ -251,16 +252,29 @@ void CoverageMappingWriter::write(raw_ostream &OS) {
OS);
writeCounter(MinExpressions, Count, OS);
writeCounter(MinExpressions, FalseCount, OS);
- encodeULEB128(unsigned(I->MCDCParams.ID), OS);
- encodeULEB128(unsigned(I->MCDCParams.TrueID), OS);
- encodeULEB128(unsigned(I->MCDCParams.FalseID), OS);
+ {
+ // They are written as internal values plus 1.
+ const auto &BranchParams = I->getBranchParams();
+ ParamsShouldBeNull = false;
+ assert(BranchParams.ID >= 0);
+ unsigned ID1 = BranchParams.ID + 1;
+ unsigned TID1 = BranchParams.Conds[true] + 1;
+ unsigned FID1 = BranchParams.Conds[false] + 1;
+ encodeULEB128(ID1, OS);
+ encodeULEB128(TID1, OS);
+ encodeULEB128(FID1, OS);
+ }
break;
case CounterMappingRegion::MCDCDecisionRegion:
encodeULEB128(unsigned(I->Kind)
<< Counter::EncodingCounterTagAndExpansionRegionTagBits,
OS);
- encodeULEB128(unsigned(I->MCDCParams.BitmapIdx), OS);
- encodeULEB128(unsigned(I->MCDCParams.NumConditions), OS);
+ {
+ const auto &DecisionParams = I->getDecisionParams();
+ ParamsShouldBeNull = false;
+ encodeULEB128(static_cast<unsigned>(DecisionParams.BitmapIdx), OS);
+ encodeULEB128(static_cast<unsigned>(DecisionParams.NumConditions), OS);
+ }
break;
}
assert(I->LineStart >= PrevLineStart);
@@ -270,6 +284,9 @@ void CoverageMappingWriter::write(raw_ostream &OS) {
encodeULEB128(I->LineEnd - I->LineStart, OS);
encodeULEB128(I->ColumnEnd, OS);
PrevLineStart = I->LineStart;
+ assert((!ParamsShouldBeNull || std::get_if<0>(&I->MCDCParams)) &&
+ "MCDCParams should be empty");
+ (void)ParamsShouldBeNull;
}
// Ensure that all file ids have at least one mapping region.
assert(CurrentFileID == (VirtualFileMapping.size() - 1));
diff --git a/llvm/lib/ProfileData/InstrProf.cpp b/llvm/lib/ProfileData/InstrProf.cpp
index d26004e..2eeeff9 100644
--- a/llvm/lib/ProfileData/InstrProf.cpp
+++ b/llvm/lib/ProfileData/InstrProf.cpp
@@ -517,35 +517,46 @@ Error InstrProfSymtab::create(StringRef NameStrings) {
std::bind(&InstrProfSymtab::addFuncName, this, std::placeholders::_1));
}
-Error InstrProfSymtab::addFuncWithName(Function &F, StringRef PGOFuncName) {
- if (Error E = addFuncName(PGOFuncName))
- return E;
- MD5FuncMap.emplace_back(Function::getGUID(PGOFuncName), &F);
+StringRef InstrProfSymtab::getCanonicalName(StringRef PGOName) {
// In ThinLTO, local function may have been promoted to global and have
// suffix ".llvm." added to the function name. We need to add the
// stripped function name to the symbol table so that we can find a match
// from profile.
//
- // We may have other suffixes similar as ".llvm." which are needed to
- // be stripped before the matching, but ".__uniq." suffix which is used
- // to differentiate internal linkage functions in different modules
- // should be kept. Now this is the only suffix with the pattern ".xxx"
- // which is kept before matching.
+ // ".__uniq." suffix is used to differentiate internal linkage functions in
+ // different modules and should be kept. This is the only suffix with the
+ // pattern ".xxx" which is kept before matching, other suffixes similar as
+ // ".llvm." will be stripped.
const std::string UniqSuffix = ".__uniq.";
- auto pos = PGOFuncName.find(UniqSuffix);
- // Search '.' after ".__uniq." if ".__uniq." exists, otherwise
- // search '.' from the beginning.
- if (pos != std::string::npos)
+ size_t pos = PGOName.find(UniqSuffix);
+ if (pos != StringRef::npos)
pos += UniqSuffix.length();
else
pos = 0;
- pos = PGOFuncName.find('.', pos);
- if (pos != std::string::npos && pos != 0) {
- StringRef OtherFuncName = PGOFuncName.substr(0, pos);
- if (Error E = addFuncName(OtherFuncName))
+
+ // Search '.' after ".__uniq." if ".__uniq." exists, otherwise search '.' from
+ // the beginning.
+ pos = PGOName.find('.', pos);
+ if (pos != StringRef::npos && pos != 0)
+ return PGOName.substr(0, pos);
+
+ return PGOName;
+}
+
+Error InstrProfSymtab::addFuncWithName(Function &F, StringRef PGOFuncName) {
+ auto mapName = [&](StringRef Name) -> Error {
+ if (Error E = addFuncName(Name))
return E;
- MD5FuncMap.emplace_back(Function::getGUID(OtherFuncName), &F);
- }
+ MD5FuncMap.emplace_back(Function::getGUID(Name), &F);
+ return Error::success();
+ };
+ if (Error E = mapName(PGOFuncName))
+ return E;
+
+ StringRef CanonicalFuncName = getCanonicalName(PGOFuncName);
+ if (CanonicalFuncName != PGOFuncName)
+ return mapName(CanonicalFuncName);
+
return Error::success();
}
diff --git a/llvm/lib/Support/APFixedPoint.cpp b/llvm/lib/Support/APFixedPoint.cpp
index 3eea01b..249c4f1 100644
--- a/llvm/lib/Support/APFixedPoint.cpp
+++ b/llvm/lib/Support/APFixedPoint.cpp
@@ -129,6 +129,12 @@ APFixedPoint APFixedPoint::getMin(const FixedPointSemantics &Sema) {
return APFixedPoint(Val, Sema);
}
+APFixedPoint APFixedPoint::getEpsilon(const FixedPointSemantics &Sema) {
+ APSInt Val(Sema.getWidth(), !Sema.isSigned());
+ Val.setBit(/*BitPosition=*/0);
+ return APFixedPoint(Val, Sema);
+}
+
bool FixedPointSemantics::fitsInFloatSemantics(
const fltSemantics &FloatSema) const {
// A fixed point semantic fits in a floating point semantic if the maximum
diff --git a/llvm/lib/Support/CMakeLists.txt b/llvm/lib/Support/CMakeLists.txt
index e19223f..1f2d824 100644
--- a/llvm/lib/Support/CMakeLists.txt
+++ b/llvm/lib/Support/CMakeLists.txt
@@ -176,6 +176,7 @@ add_llvm_component_library(LLVMSupport
ELFAttributes.cpp
Error.cpp
ErrorHandling.cpp
+ ExponentialBackoff.cpp
ExtensibleRTTI.cpp
FileCollector.cpp
FileUtilities.cpp
diff --git a/llvm/lib/Support/DeltaAlgorithm.cpp b/llvm/lib/Support/DeltaAlgorithm.cpp
index 341de24..d763cde 100644
--- a/llvm/lib/Support/DeltaAlgorithm.cpp
+++ b/llvm/lib/Support/DeltaAlgorithm.cpp
@@ -83,9 +83,9 @@ bool DeltaAlgorithm::Search(const changeset_ty &Changes,
if (Sets.size() > 2) {
// FIXME: This is really slow.
changeset_ty Complement;
- std::set_difference(
- Changes.begin(), Changes.end(), it->begin(), it->end(),
- std::insert_iterator<changeset_ty>(Complement, Complement.begin()));
+ std::set_difference(Changes.begin(), Changes.end(), it->begin(),
+ it->end(),
+ std::inserter(Complement, Complement.begin()));
if (GetTestResult(Complement)) {
changesetlist_ty ComplementSets;
ComplementSets.insert(ComplementSets.end(), Sets.begin(), it);
diff --git a/llvm/lib/Support/ExponentialBackoff.cpp b/llvm/lib/Support/ExponentialBackoff.cpp
new file mode 100644
index 0000000..7e68cf6
--- /dev/null
+++ b/llvm/lib/Support/ExponentialBackoff.cpp
@@ -0,0 +1,29 @@
+//===- llvm/Support/ExponentialBackoff.h ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/ExponentialBackoff.h"
+#include <thread>
+
+using namespace llvm;
+
+bool ExponentialBackoff::waitForNextAttempt() {
+ auto Now = std::chrono::steady_clock::now();
+ if (Now >= EndTime)
+ return false;
+
+ duration CurMaxWait = std::min(MinWait * CurrentMultiplier, MaxWait);
+ std::uniform_int_distribution<uint64_t> Dist(MinWait.count(),
+ CurMaxWait.count());
+ // Use random_device directly instead of a PRNG as uniform_int_distribution
+ // often only takes a few samples anyway.
+ duration WaitDuration = std::min(duration(Dist(RandDev)), EndTime - Now);
+ if (CurMaxWait < MaxWait)
+ CurrentMultiplier *= 2;
+ std::this_thread::sleep_for(WaitDuration);
+ return true;
+}
diff --git a/llvm/lib/Support/LockFileManager.cpp b/llvm/lib/Support/LockFileManager.cpp
index a2b0fe8c..34c7a16 100644
--- a/llvm/lib/Support/LockFileManager.cpp
+++ b/llvm/lib/Support/LockFileManager.cpp
@@ -11,6 +11,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Errc.h"
#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/ExponentialBackoff.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Process.h"
@@ -20,7 +21,6 @@
#include <chrono>
#include <ctime>
#include <memory>
-#include <random>
#include <sys/stat.h>
#include <sys/types.h>
#include <system_error>
@@ -295,29 +295,15 @@ LockFileManager::waitForUnlock(const unsigned MaxSeconds) {
return Res_Success;
// Since we don't yet have an event-based method to wait for the lock file,
- // implement randomized exponential backoff, similar to Ethernet collision
+ // use randomized exponential backoff, similar to Ethernet collision
// algorithm. This improves performance on machines with high core counts
// when the file lock is heavily contended by multiple clang processes
- const unsigned long MinWaitDurationMS = 10;
- const unsigned long MaxWaitMultiplier = 50; // 500ms max wait
- unsigned long WaitMultiplier = 1;
- unsigned long ElapsedTimeSeconds = 0;
+ using namespace std::chrono_literals;
+ ExponentialBackoff Backoff(std::chrono::seconds(MaxSeconds), 10ms, 500ms);
- std::random_device Device;
- std::default_random_engine Engine(Device());
-
- auto StartTime = std::chrono::steady_clock::now();
-
- do {
+ // Wait first as this is only called when the lock is known to be held.
+ while (Backoff.waitForNextAttempt()) {
// FIXME: implement event-based waiting
-
- // Sleep for the designated interval, to allow the owning process time to
- // finish up and remove the lock file.
- std::uniform_int_distribution<unsigned long> Distribution(1,
- WaitMultiplier);
- unsigned long WaitDurationMS = MinWaitDurationMS * Distribution(Engine);
- std::this_thread::sleep_for(std::chrono::milliseconds(WaitDurationMS));
-
if (sys::fs::access(LockFileName.c_str(), sys::fs::AccessMode::Exist) ==
errc::no_such_file_or_directory) {
// If the original file wasn't created, somone thought the lock was dead.
@@ -329,17 +315,7 @@ LockFileManager::waitForUnlock(const unsigned MaxSeconds) {
// If the process owning the lock died without cleaning up, just bail out.
if (!processStillExecuting((*Owner).first, (*Owner).second))
return Res_OwnerDied;
-
- WaitMultiplier *= 2;
- if (WaitMultiplier > MaxWaitMultiplier) {
- WaitMultiplier = MaxWaitMultiplier;
- }
-
- ElapsedTimeSeconds = std::chrono::duration_cast<std::chrono::seconds>(
- std::chrono::steady_clock::now() - StartTime)
- .count();
-
- } while (ElapsedTimeSeconds < MaxSeconds);
+ }
// Give up.
return Res_Timeout;
diff --git a/llvm/lib/Target/AArch64/AArch64.td b/llvm/lib/Target/AArch64/AArch64.td
index 3377fcf..169b00e 100644
--- a/llvm/lib/Target/AArch64/AArch64.td
+++ b/llvm/lib/Target/AArch64/AArch64.td
@@ -837,6 +837,7 @@ include "AArch64SchedA64FX.td"
include "AArch64SchedThunderX3T110.td"
include "AArch64SchedTSV110.td"
include "AArch64SchedAmpere1.td"
+include "AArch64SchedAmpere1B.td"
include "AArch64SchedNeoverseN1.td"
include "AArch64SchedNeoverseN2.td"
include "AArch64SchedNeoverseV1.td"
@@ -1555,7 +1556,7 @@ def ProcessorFeatures {
FeatureMTE, FeatureSSBS, FeatureRandGen,
FeatureSB, FeatureSM4, FeatureSHA2,
FeatureSHA3, FeatureAES, FeatureCSSC,
- FeatureWFxT];
+ FeatureWFxT, FeatureFullFP16];
// ETE and TRBE are future architecture extensions. We temporarily enable them
// by default for users targeting generic AArch64. The extensions do not
@@ -1723,7 +1724,7 @@ def : ProcessorModel<"ampere1", Ampere1Model, ProcessorFeatures.Ampere1,
def : ProcessorModel<"ampere1a", Ampere1Model, ProcessorFeatures.Ampere1A,
[TuneAmpere1A]>;
-def : ProcessorModel<"ampere1b", Ampere1Model, ProcessorFeatures.Ampere1B,
+def : ProcessorModel<"ampere1b", Ampere1BModel, ProcessorFeatures.Ampere1B,
[TuneAmpere1B]>;
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index a3b7e31..8c5a4cd 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -12935,42 +12935,74 @@ static SDValue NormalizeBuildVector(SDValue Op,
return DAG.getBuildVector(VT, dl, Ops);
}
-static SDValue ConstantBuildVector(SDValue Op, SelectionDAG &DAG) {
+static SDValue ConstantBuildVector(SDValue Op, SelectionDAG &DAG,
+ const AArch64Subtarget *ST) {
EVT VT = Op.getValueType();
+ assert((VT.getSizeInBits() == 64 || VT.getSizeInBits() == 128) &&
+ "Expected a legal NEON vector");
APInt DefBits(VT.getSizeInBits(), 0);
APInt UndefBits(VT.getSizeInBits(), 0);
BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
if (resolveBuildVector(BVN, DefBits, UndefBits)) {
- SDValue NewOp;
- if ((NewOp = tryAdvSIMDModImm64(AArch64ISD::MOVIedit, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImm32(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MOVImsl, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImm16(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImm8(AArch64ISD::MOVI, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImmFP(AArch64ISD::FMOV, Op, DAG, DefBits)))
- return NewOp;
-
- DefBits = ~DefBits;
- if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::MVNIshift, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MVNImsl, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImm16(AArch64ISD::MVNIshift, Op, DAG, DefBits)))
- return NewOp;
-
- DefBits = UndefBits;
- if ((NewOp = tryAdvSIMDModImm64(AArch64ISD::MOVIedit, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImm32(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MOVImsl, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImm16(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImm8(AArch64ISD::MOVI, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImmFP(AArch64ISD::FMOV, Op, DAG, DefBits)))
- return NewOp;
+ auto TryMOVIWithBits = [&](APInt DefBits) {
+ SDValue NewOp;
+ if ((NewOp =
+ tryAdvSIMDModImm64(AArch64ISD::MOVIedit, Op, DAG, DefBits)) ||
+ (NewOp =
+ tryAdvSIMDModImm32(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
+ (NewOp =
+ tryAdvSIMDModImm321s(AArch64ISD::MOVImsl, Op, DAG, DefBits)) ||
+ (NewOp =
+ tryAdvSIMDModImm16(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
+ (NewOp = tryAdvSIMDModImm8(AArch64ISD::MOVI, Op, DAG, DefBits)) ||
+ (NewOp = tryAdvSIMDModImmFP(AArch64ISD::FMOV, Op, DAG, DefBits)))
+ return NewOp;
+
+ APInt NotDefBits = ~DefBits;
+ if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::MVNIshift, Op, DAG,
+ NotDefBits)) ||
+ (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MVNImsl, Op, DAG,
+ NotDefBits)) ||
+ (NewOp =
+ tryAdvSIMDModImm16(AArch64ISD::MVNIshift, Op, DAG, NotDefBits)))
+ return NewOp;
+ return SDValue();
+ };
+ if (SDValue R = TryMOVIWithBits(DefBits))
+ return R;
+ if (SDValue R = TryMOVIWithBits(UndefBits))
+ return R;
- DefBits = ~UndefBits;
- if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::MVNIshift, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MVNImsl, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImm16(AArch64ISD::MVNIshift, Op, DAG, DefBits)))
- return NewOp;
+ // See if a fneg of the constant can be materialized with a MOVI, etc
+ auto TryWithFNeg = [&](APInt DefBits, MVT FVT) {
+ // FNegate each sub-element of the constant
+ assert(VT.getSizeInBits() % FVT.getScalarSizeInBits() == 0);
+ APInt Neg = APInt::getHighBitsSet(FVT.getSizeInBits(), 1)
+ .zext(VT.getSizeInBits());
+ APInt NegBits(VT.getSizeInBits(), 0);
+ unsigned NumElts = VT.getSizeInBits() / FVT.getScalarSizeInBits();
+ for (unsigned i = 0; i < NumElts; i++)
+ NegBits |= Neg << (FVT.getScalarSizeInBits() * i);
+ NegBits = DefBits ^ NegBits;
+
+ // Try to create the new constants with MOVI, and if so generate a fneg
+ // for it.
+ if (SDValue NewOp = TryMOVIWithBits(NegBits)) {
+ SDLoc DL(Op);
+ MVT VFVT = NumElts == 1 ? FVT : MVT::getVectorVT(FVT, NumElts);
+ return DAG.getNode(
+ AArch64ISD::NVCAST, DL, VT,
+ DAG.getNode(ISD::FNEG, DL, VFVT,
+ DAG.getNode(AArch64ISD::NVCAST, DL, VFVT, NewOp)));
+ }
+ return SDValue();
+ };
+ SDValue R;
+ if ((R = TryWithFNeg(DefBits, MVT::f32)) ||
+ (R = TryWithFNeg(DefBits, MVT::f64)) ||
+ (ST->hasFullFP16() && (R = TryWithFNeg(DefBits, MVT::f16))))
+ return R;
}
return SDValue();
@@ -13019,7 +13051,7 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
return Op;
}
- if (SDValue V = ConstantBuildVector(Op, DAG))
+ if (SDValue V = ConstantBuildVector(Op, DAG, Subtarget))
return V;
// Scan through the operands to find some interesting properties we can
@@ -13244,7 +13276,7 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
ConstantValueAPInt = C->getAPIntValue().zextOrTrunc(BitSize);
if (!isNullConstant(ConstantValue) && !isNullFPConstant(ConstantValue) &&
!ConstantValueAPInt.isAllOnes()) {
- Val = ConstantBuildVector(Val, DAG);
+ Val = ConstantBuildVector(Val, DAG, Subtarget);
if (!Val)
// Otherwise, materialize the constant and splat it.
Val = DAG.getNode(AArch64ISD::DUP, dl, VT, ConstantValue);
@@ -23145,9 +23177,12 @@ static SDValue performDUPCombine(SDNode *N,
}
/// Get rid of unnecessary NVCASTs (that don't change the type).
-static SDValue performNVCASTCombine(SDNode *N) {
+static SDValue performNVCASTCombine(SDNode *N, SelectionDAG &DAG) {
if (N->getValueType(0) == N->getOperand(0).getValueType())
return N->getOperand(0);
+ if (N->getOperand(0).getOpcode() == AArch64ISD::NVCAST)
+ return DAG.getNode(AArch64ISD::NVCAST, SDLoc(N), N->getValueType(0),
+ N->getOperand(0).getOperand(0));
return SDValue();
}
@@ -24141,7 +24176,7 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
case AArch64ISD::DUPLANE128:
return performDupLane128Combine(N, DAG);
case AArch64ISD::NVCAST:
- return performNVCASTCombine(N);
+ return performNVCASTCombine(N, DAG);
case AArch64ISD::SPLICE:
return performSpliceCombine(N, DAG);
case AArch64ISD::UUNPKLO:
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 9c3a692..8c2a852 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -7595,13 +7595,17 @@ defm USHR : SIMDVectorRShiftBHSD<1, 0b00000, "ushr", AArch64vlshr>;
defm USRA : SIMDVectorRShiftBHSDTied<1, 0b00010, "usra",
TriOpFrag<(add_and_or_is_add node:$LHS, (AArch64vlshr node:$MHS, node:$RHS))> >;
+def VImm0080: PatLeaf<(AArch64movi_shift (i32 128), (i32 0))>;
+def VImm00008000: PatLeaf<(AArch64movi_shift (i32 128), (i32 8))>;
+def VImm0000000080000000: PatLeaf<(AArch64NvCast (v2f64 (fneg (AArch64NvCast (v4i32 (AArch64movi_shift (i32 128), (i32 24)))))))>;
+
// RADDHN patterns for when RSHRN shifts by half the size of the vector element
-def : Pat<(v8i8 (trunc (AArch64vlshr (add (v8i16 V128:$Vn), (AArch64movi_shift (i32 128), (i32 0))), (i32 8)))),
+def : Pat<(v8i8 (trunc (AArch64vlshr (add (v8i16 V128:$Vn), VImm0080), (i32 8)))),
(RADDHNv8i16_v8i8 V128:$Vn, (v8i16 (MOVIv2d_ns (i32 0))))>;
-def : Pat<(v4i16 (trunc (AArch64vlshr (add (v4i32 V128:$Vn), (AArch64movi_shift (i32 128), (i32 8))), (i32 16)))),
+def : Pat<(v4i16 (trunc (AArch64vlshr (add (v4i32 V128:$Vn), VImm00008000), (i32 16)))),
(RADDHNv4i32_v4i16 V128:$Vn, (v4i32 (MOVIv2d_ns (i32 0))))>;
let AddedComplexity = 5 in
-def : Pat<(v2i32 (trunc (AArch64vlshr (add (v2i64 V128:$Vn), (AArch64dup (i64 2147483648))), (i32 32)))),
+def : Pat<(v2i32 (trunc (AArch64vlshr (add (v2i64 V128:$Vn), VImm0000000080000000), (i32 32)))),
(RADDHNv2i64_v2i32 V128:$Vn, (v2i64 (MOVIv2d_ns (i32 0))))>;
def : Pat<(v8i8 (int_aarch64_neon_rshrn (v8i16 V128:$Vn), (i32 8))),
(RADDHNv8i16_v8i8 V128:$Vn, (v8i16 (MOVIv2d_ns (i32 0))))>;
@@ -7613,20 +7617,20 @@ def : Pat<(v2i32 (int_aarch64_neon_rshrn (v2i64 V128:$Vn), (i32 32))),
// RADDHN2 patterns for when RSHRN shifts by half the size of the vector element
def : Pat<(v16i8 (concat_vectors
(v8i8 V64:$Vd),
- (v8i8 (trunc (AArch64vlshr (add (v8i16 V128:$Vn), (AArch64movi_shift (i32 128), (i32 0))), (i32 8)))))),
+ (v8i8 (trunc (AArch64vlshr (add (v8i16 V128:$Vn), VImm0080), (i32 8)))))),
(RADDHNv8i16_v16i8
(INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn,
(v8i16 (MOVIv2d_ns (i32 0))))>;
def : Pat<(v8i16 (concat_vectors
(v4i16 V64:$Vd),
- (v4i16 (trunc (AArch64vlshr (add (v4i32 V128:$Vn), (AArch64movi_shift (i32 128), (i32 8))), (i32 16)))))),
+ (v4i16 (trunc (AArch64vlshr (add (v4i32 V128:$Vn), VImm00008000), (i32 16)))))),
(RADDHNv4i32_v8i16
(INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn,
(v4i32 (MOVIv2d_ns (i32 0))))>;
let AddedComplexity = 5 in
def : Pat<(v4i32 (concat_vectors
(v2i32 V64:$Vd),
- (v2i32 (trunc (AArch64vlshr (add (v2i64 V128:$Vn), (AArch64dup (i64 2147483648))), (i32 32)))))),
+ (v2i32 (trunc (AArch64vlshr (add (v2i64 V128:$Vn), VImm0000000080000000), (i32 32)))))),
(RADDHNv2i64_v4i32
(INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn,
(v2i64 (MOVIv2d_ns (i32 0))))>;
diff --git a/llvm/lib/Target/AArch64/AArch64SchedA53.td b/llvm/lib/Target/AArch64/AArch64SchedA53.td
index 3e4168f..c714bad 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedA53.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedA53.td
@@ -29,7 +29,7 @@ def CortexA53Model : SchedMachineModel {
list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F,
SMEUnsupported.F,
- [HasMTE]);
+ [HasMTE, HasCSSC]);
}
diff --git a/llvm/lib/Target/AArch64/AArch64SchedA57.td b/llvm/lib/Target/AArch64/AArch64SchedA57.td
index 277ec77..ebbc3b7 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedA57.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedA57.td
@@ -34,7 +34,7 @@ def CortexA57Model : SchedMachineModel {
list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F,
SMEUnsupported.F,
- [HasMTE]);
+ [HasMTE, HasCSSC]);
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AArch64/AArch64SchedA64FX.td b/llvm/lib/Target/AArch64/AArch64SchedA64FX.td
index 7edce4b..d6fe84a 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedA64FX.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedA64FX.td
@@ -22,7 +22,8 @@ def A64FXModel : SchedMachineModel {
list<Predicate> UnsupportedFeatures = !listconcat(SMEUnsupported.F, SVEUnsupported.F,
[HasMTE, HasMatMulInt8, HasBF16,
- HasPAuth, HasPAuthLR, HasCPA]);
+ HasPAuth, HasPAuthLR, HasCPA,
+ HasCSSC]);
let FullInstRWOverlapCheck = 0;
}
diff --git a/llvm/lib/Target/AArch64/AArch64SchedAmpere1B.td b/llvm/lib/Target/AArch64/AArch64SchedAmpere1B.td
new file mode 100644
index 0000000..9c4f000
--- /dev/null
+++ b/llvm/lib/Target/AArch64/AArch64SchedAmpere1B.td
@@ -0,0 +1,1149 @@
+//=- AArch64SchedAmpere1B.td - Ampere-1B scheduling def -----*- tablegen -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the machine model for the Ampere Computing Ampere-1B to
+// support instruction scheduling and other instruction cost heuristics.
+//
+//===----------------------------------------------------------------------===//
+
+// The Ampere-1B core is an out-of-order micro-architecture. The front
+// end has branch prediction, with a 10-cycle recovery time from a
+// mispredicted branch. Instructions coming out of the front end are
+// decoded into internal micro-ops (uops).
+
+def Ampere1BModel : SchedMachineModel {
+ let IssueWidth = 12; // Maximum micro-ops dispatch rate.
+ let MicroOpBufferSize = 192; // micro-op re-order buffer size
+ let LoadLatency = 3; // Optimistic load latency
+ let MispredictPenalty = 10; // Branch mispredict penalty
+ let LoopMicroOpBufferSize = 32; // Instruction queue size
+ let CompleteModel = 1;
+
+ list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
+ SMEUnsupported.F,
+ PAUnsupported.F);
+}
+
+let SchedModel = Ampere1BModel in {
+
+//===----------------------------------------------------------------------===//
+// Define each kind of processor resource and number available on Ampere-1B.
+
+def Ampere1BUnitA : ProcResource<2>; // integer single-cycle, branch, and flags r/w
+def Ampere1BUnitB : ProcResource<2>; // integer single-cycle, and complex shifts
+def Ampere1BUnitBS : ProcResource<1>; // integer multi-cycle
+def Ampere1BUnitL : ProcResource<2>; // load
+def Ampere1BUnitS : ProcResource<2>; // store address calculation
+def Ampere1BUnitX : ProcResource<1>; // FP and vector operations, and flag write
+def Ampere1BUnitY : ProcResource<1>; // FP and vector operations, and crypto
+def Ampere1BUnitZ : ProcResource<1>; // FP store data and FP-to-integer moves
+
+def Ampere1BUnitAB : ProcResGroup<[Ampere1BUnitA, Ampere1BUnitB]>;
+def Ampere1BUnitXY : ProcResGroup<[Ampere1BUnitX, Ampere1BUnitY]>;
+
+//===----------------------------------------------------------------------===//
+// Define customized scheduler read/write types specific to the Ampere-1.
+
+def Ampere1BWrite_1cyc_1A : SchedWriteRes<[Ampere1BUnitA]> {
+ let Latency = 1;
+ let NumMicroOps = 1;
+}
+
+def Ampere1BWrite_1cyc_2A : SchedWriteRes<[Ampere1BUnitA, Ampere1BUnitA]> {
+ let Latency = 1;
+ let NumMicroOps = 2;
+}
+
+def Ampere1BWrite_1cyc_1B : SchedWriteRes<[Ampere1BUnitB]> {
+ let Latency = 1;
+ let NumMicroOps = 1;
+}
+
+def Ampere1BWrite_1cyc_1BS : SchedWriteRes<[Ampere1BUnitBS]> {
+ let Latency = 1;
+ let NumMicroOps = 1;
+}
+
+def Ampere1BWrite_1cyc_1BS_1B : SchedWriteRes<[Ampere1BUnitBS, Ampere1BUnitB]> {
+ let Latency = 1;
+ let NumMicroOps = 2;
+}
+
+def Ampere1BWrite_1cyc_1AB : SchedWriteRes<[Ampere1BUnitAB]> {
+ let Latency = 1;
+ let NumMicroOps = 1;
+}
+
+def Ampere1BWrite_1cyc_1AB_1A : SchedWriteRes<[Ampere1BUnitAB, Ampere1BUnitA]> {
+ let Latency = 1;
+ let NumMicroOps = 2;
+}
+
+def Ampere1BWrite_1cyc_1L : SchedWriteRes<[Ampere1BUnitL]> {
+ let Latency = 1;
+ let NumMicroOps = 1;
+}
+
+def Ampere1BWrite_1cyc_1S : SchedWriteRes<[Ampere1BUnitS]> {
+ let Latency = 1;
+ let NumMicroOps = 1;
+}
+
+def Ampere1BWrite_1cyc_2S : SchedWriteRes<[Ampere1BUnitS, Ampere1BUnitS]> {
+ let Latency = 1;
+ let NumMicroOps = 2;
+}
+
+def Ampere1BWrite_2cyc_1Y : SchedWriteRes<[Ampere1BUnitY]> {
+ let Latency = 2;
+ let NumMicroOps = 1;
+}
+
+def Ampere1BWrite_2cyc_2AB : SchedWriteRes<[Ampere1BUnitAB, Ampere1BUnitAB]> {
+ let Latency = 2;
+ let NumMicroOps = 2;
+}
+
+def Ampere1BWrite_2cyc_1B_1AB : SchedWriteRes<[Ampere1BUnitB, Ampere1BUnitAB]> {
+ let Latency = 2;
+ let NumMicroOps = 2;
+}
+
+def Ampere1BWrite_2cyc_1B_1S : SchedWriteRes<[Ampere1BUnitB, Ampere1BUnitS]> {
+ let Latency = 2;
+ let NumMicroOps = 2;
+}
+
+def Ampere1BWrite_2cyc_1B_1S_1AB : SchedWriteRes<[Ampere1BUnitB,
+ Ampere1BUnitS,
+ Ampere1BUnitAB]> {
+ let Latency = 2;
+ let NumMicroOps = 3;
+}
+
+def Ampere1BWrite_2cyc_1S_2Z : SchedWriteRes<[Ampere1BUnitS,
+ Ampere1BUnitZ,
+ Ampere1BUnitZ]> {
+ let Latency = 2;
+ let NumMicroOps = 3;
+}
+
+def Ampere1BWrite_2cyc_1XY : SchedWriteRes<[Ampere1BUnitXY]> {
+ let Latency = 2;
+ let NumMicroOps = 1;
+}
+
+def Ampere1BWrite_2cyc_1S_1Z : SchedWriteRes<[Ampere1BUnitS, Ampere1BUnitZ]> {
+ let Latency = 2;
+ let NumMicroOps = 2;
+}
+
+def Ampere1BWrite_3cyc_1BS : SchedWriteRes<[Ampere1BUnitBS]> {
+ let Latency = 3;
+ let NumMicroOps = 1;
+}
+
+def Ampere1BWrite_3cyc_1L : SchedWriteRes<[Ampere1BUnitL]> {
+ let Latency = 3;
+ let NumMicroOps = 1;
+}
+
+def Ampere1BWrite_3cyc_1X : SchedWriteRes<[Ampere1BUnitX]> {
+ let Latency = 3;
+ let NumMicroOps = 1;
+}
+
+def Ampere1BWrite_3cyc_1XY : SchedWriteRes<[Ampere1BUnitXY]> {
+ let Latency = 3;
+ let NumMicroOps = 1;
+}
+
+def Ampere1BWrite_3cyc_1Z : SchedWriteRes<[Ampere1BUnitZ]> {
+ let Latency = 3;
+ let NumMicroOps = 1;
+}
+
+def Ampere1BWrite_3cyc_1S_1Z : SchedWriteRes<[Ampere1BUnitS,
+ Ampere1BUnitZ]> {
+ let Latency = 3;
+ let NumMicroOps = 2;
+}
+
+def Ampere1BWrite_3cyc_1S_2Z : SchedWriteRes<[Ampere1BUnitS,
+ Ampere1BUnitZ, Ampere1BUnitZ]> {
+ let Latency = 3;
+ let NumMicroOps = 3;
+}
+
+def Ampere1BWrite_3cyc_2S_2Z : SchedWriteRes<[Ampere1BUnitS, Ampere1BUnitS,
+ Ampere1BUnitZ, Ampere1BUnitZ]> {
+ let Latency = 3;
+ let NumMicroOps = 4;
+}
+
+def Ampere1BWrite_4cyc_1BS_1AB : SchedWriteRes<[Ampere1BUnitBS, Ampere1BUnitAB]> {
+ let Latency = 4;
+ let NumMicroOps = 2;
+}
+
+def Ampere1BWrite_4cyc_1L : SchedWriteRes<[Ampere1BUnitL]> {
+ let Latency = 4;
+ let NumMicroOps = 1;
+}
+
+def Ampere1BWrite_4cyc_2L : SchedWriteRes<[Ampere1BUnitL, Ampere1BUnitL]> {
+ let Latency = 4;
+ let NumMicroOps = 2;
+}
+
+def Ampere1BWrite_4cyc_1L_1B : SchedWriteRes<[Ampere1BUnitL, Ampere1BUnitB]> {
+ let Latency = 4;
+ let NumMicroOps = 2;
+}
+
+def Ampere1BWrite_4cyc_1X : SchedWriteRes<[Ampere1BUnitX]> {
+ let Latency = 4;
+ let NumMicroOps = 1;
+}
+
+def Ampere1BWrite_4cyc_1XY : SchedWriteRes<[Ampere1BUnitXY]> {
+ let Latency = 4;
+ let NumMicroOps = 1;
+}
+
+def Ampere1BWrite_4cyc_2XY : SchedWriteRes<[Ampere1BUnitXY, Ampere1BUnitXY]> {
+ let Latency = 4;
+ let NumMicroOps = 2;
+}
+
+def Ampere1BWrite_5cyc_1BS : SchedWriteRes<[Ampere1BUnitBS]> {
+ let Latency = 5;
+ let NumMicroOps = 1;
+}
+
+def Ampere1BWrite_4cyc_1XY_1S_1Z : SchedWriteRes<[Ampere1BUnitXY,
+ Ampere1BUnitS,
+ Ampere1BUnitZ]> {
+ let Latency = 4;
+ let NumMicroOps = 3;
+}
+
+def Ampere1BWrite_4cyc_3S_3Z : SchedWriteRes<[Ampere1BUnitS, Ampere1BUnitS,
+ Ampere1BUnitS, Ampere1BUnitZ,
+ Ampere1BUnitZ, Ampere1BUnitZ]> {
+ let Latency = 4;
+ let NumMicroOps = 6;
+}
+
+def Ampere1BWrite_5cyc_4S_4Z : SchedWriteRes<[Ampere1BUnitS, Ampere1BUnitS,
+ Ampere1BUnitS, Ampere1BUnitS,
+ Ampere1BUnitZ, Ampere1BUnitZ,
+ Ampere1BUnitZ, Ampere1BUnitZ]> {
+ let Latency = 5;
+ let NumMicroOps = 8;
+}
+
+def Ampere1BWrite_5cyc_1L_1BS : SchedWriteRes<[Ampere1BUnitL,
+ Ampere1BUnitBS]> {
+ let Latency = 5;
+ let NumMicroOps = 2;
+}
+
+def Ampere1BWrite_5cyc_3L : SchedWriteRes<[Ampere1BUnitL,
+ Ampere1BUnitL,
+ Ampere1BUnitL]> {
+ let Latency = 5;
+ let NumMicroOps = 3;
+}
+
+def Ampere1BWrite_5cyc_4L : SchedWriteRes<[Ampere1BUnitL,
+ Ampere1BUnitL,
+ Ampere1BUnitL,
+ Ampere1BUnitL]> {
+ let Latency = 5;
+ let NumMicroOps = 4;
+}
+
+def Ampere1BWrite_5cyc_1X : SchedWriteRes<[Ampere1BUnitX]> {
+ let Latency = 5;
+ let NumMicroOps = 1;
+}
+
+def Ampere1BWrite_5cyc_2XY_2S_2Z : SchedWriteRes<[Ampere1BUnitXY, Ampere1BUnitXY,
+ Ampere1BUnitS, Ampere1BUnitS,
+ Ampere1BUnitZ, Ampere1BUnitZ]> {
+ let Latency = 5;
+ let NumMicroOps = 6;
+}
+
+def Ampere1BWrite_6cyc_1BS_1A : SchedWriteRes<[Ampere1BUnitBS, Ampere1BUnitA]> {
+ let Latency = 6;
+ let NumMicroOps = 2;
+}
+
+def Ampere1BWrite_6cyc_1BS_2A : SchedWriteRes<[Ampere1BUnitBS, Ampere1BUnitA,
+ Ampere1BUnitA]> {
+ let Latency = 6;
+ let NumMicroOps = 3;
+}
+
+def Ampere1BWrite_6cyc_1L_1XY : SchedWriteRes<[Ampere1BUnitL, Ampere1BUnitXY]> {
+ let Latency = 6;
+ let NumMicroOps = 2;
+}
+
+def Ampere1BWrite_6cyc_2L_2XY : SchedWriteRes<[Ampere1BUnitL, Ampere1BUnitL,
+ Ampere1BUnitXY, Ampere1BUnitXY]> {
+ let Latency = 6;
+ let NumMicroOps = 4;
+}
+
+def Ampere1BWrite_6cyc_1X : SchedWriteRes<[Ampere1BUnitX]> {
+ let Latency = 6;
+ let NumMicroOps = 2;
+}
+
+def Ampere1BWrite_6cyc_2XY : SchedWriteRes<[Ampere1BUnitXY, Ampere1BUnitXY]> {
+ let Latency = 6;
+ let NumMicroOps = 2;
+}
+
+def Ampere1BWrite_6cyc_3XY : SchedWriteRes<[Ampere1BUnitXY, Ampere1BUnitXY,
+ Ampere1BUnitXY]> {
+ let Latency = 6;
+ let NumMicroOps = 3;
+}
+
+def Ampere1BWrite_6cyc_2XY_2S_2Z : SchedWriteRes<[Ampere1BUnitXY, Ampere1BUnitXY,
+ Ampere1BUnitS, Ampere1BUnitS,
+ Ampere1BUnitZ, Ampere1BUnitZ]> {
+ let Latency = 6;
+ let NumMicroOps = 6;
+}
+
+def Ampere1BWrite_6cyc_3XY_3S_3Z : SchedWriteRes<[Ampere1BUnitXY, Ampere1BUnitXY, Ampere1BUnitXY,
+ Ampere1BUnitS, Ampere1BUnitS, Ampere1BUnitS,
+ Ampere1BUnitZ, Ampere1BUnitZ, Ampere1BUnitZ]> {
+ let Latency = 6;
+ let NumMicroOps = 9;
+}
+
+def Ampere1BWrite_7cyc_1BS_1XY : SchedWriteRes<[Ampere1BUnitBS, Ampere1BUnitXY]> {
+ let Latency = 7;
+ let NumMicroOps = 2;
+}
+
+def Ampere1BWrite_7cyc_1XY_1Z : SchedWriteRes<[Ampere1BUnitXY, Ampere1BUnitZ]> {
+ let Latency = 7;
+ let NumMicroOps = 2;
+}
+
+def Ampere1BWrite_7cyc_1X_1Z : SchedWriteRes<[Ampere1BUnitX, Ampere1BUnitZ]> {
+ let Latency = 7;
+ let NumMicroOps = 2;
+}
+
+def Ampere1BWrite_7cyc_3L_3XY : SchedWriteRes<[Ampere1BUnitL, Ampere1BUnitL,
+ Ampere1BUnitL, Ampere1BUnitXY,
+ Ampere1BUnitXY, Ampere1BUnitXY]> {
+ let Latency = 7;
+ let NumMicroOps = 6;
+}
+
+def Ampere1BWrite_7cyc_4L_4XY : SchedWriteRes<[Ampere1BUnitL, Ampere1BUnitL,
+ Ampere1BUnitL, Ampere1BUnitL,
+ Ampere1BUnitXY, Ampere1BUnitXY,
+ Ampere1BUnitXY, Ampere1BUnitXY]> {
+ let Latency = 7;
+ let NumMicroOps = 8;
+}
+
+def Ampere1BWrite_7cyc_4XY_4S_4Z : SchedWriteRes<[Ampere1BUnitXY, Ampere1BUnitXY,
+ Ampere1BUnitXY, Ampere1BUnitXY,
+ Ampere1BUnitS, Ampere1BUnitS,
+ Ampere1BUnitS, Ampere1BUnitS,
+ Ampere1BUnitZ, Ampere1BUnitZ,
+ Ampere1BUnitZ, Ampere1BUnitZ]> {
+ let Latency = 7;
+ let NumMicroOps = 12;
+}
+
+def Ampere1BWrite_8cyc_1BS_1L : SchedWriteRes<[Ampere1BUnitBS, Ampere1BUnitL]> {
+ let Latency = 8;
+ let NumMicroOps = 2;
+}
+
+def Ampere1BWrite_8cyc_1BS_1XY : SchedWriteRes<[Ampere1BUnitBS, Ampere1BUnitXY]> {
+ let Latency = 8;
+ let NumMicroOps = 2;
+}
+
+def Ampere1BWrite_8cyc_2L_3XY : SchedWriteRes<[Ampere1BUnitL, Ampere1BUnitL,
+ Ampere1BUnitXY, Ampere1BUnitXY,
+ Ampere1BUnitXY]> {
+ let Latency = 8;
+ let NumMicroOps = 5;
+}
+
+def Ampere1BWrite_8cyc_3L_3XY : SchedWriteRes<[Ampere1BUnitL, Ampere1BUnitL,
+ Ampere1BUnitL, Ampere1BUnitXY,
+ Ampere1BUnitXY, Ampere1BUnitXY]> {
+ let Latency = 8;
+ let NumMicroOps = 6;
+}
+
+def Ampere1BWrite_8cyc_4L_4XY : SchedWriteRes<[Ampere1BUnitL, Ampere1BUnitL,
+ Ampere1BUnitL, Ampere1BUnitL,
+ Ampere1BUnitXY, Ampere1BUnitXY,
+ Ampere1BUnitXY, Ampere1BUnitXY]> {
+ let Latency = 8;
+ let NumMicroOps = 8;
+}
+
+def Ampere1BWrite_8cyc_2XY : SchedWriteRes<[Ampere1BUnitXY, Ampere1BUnitXY]> {
+ let Latency = 8;
+ let NumMicroOps = 2;
+}
+
+def Ampere1BWrite_8cyc_4XY : SchedWriteRes<[Ampere1BUnitXY, Ampere1BUnitXY,
+ Ampere1BUnitXY, Ampere1BUnitXY]> {
+ let Latency = 8;
+ let NumMicroOps = 4;
+}
+
+def Ampere1BWrite_9cyc_6XY_4S_4Z : SchedWriteRes<[Ampere1BUnitXY, Ampere1BUnitXY,
+ Ampere1BUnitXY, Ampere1BUnitXY,
+ Ampere1BUnitXY, Ampere1BUnitXY,
+ Ampere1BUnitS, Ampere1BUnitS,
+ Ampere1BUnitS, Ampere1BUnitS,
+ Ampere1BUnitZ, Ampere1BUnitZ,
+ Ampere1BUnitZ, Ampere1BUnitZ]> {
+ let Latency = 9;
+ let NumMicroOps = 14;
+}
+
+def Ampere1BWrite_9cyc_1A_1BS_1X : SchedWriteRes<[Ampere1BUnitA, Ampere1BUnitBS, Ampere1BUnitX]> {
+ let Latency = 9;
+ let NumMicroOps = 3;
+}
+
+def Ampere1BWrite_9cyc_1A_1BS_1XY : SchedWriteRes<[Ampere1BUnitA, Ampere1BUnitBS, Ampere1BUnitXY]> {
+ let Latency = 9;
+ let NumMicroOps = 3;
+}
+
+def Ampere1BWrite_9cyc_3L_3XY : SchedWriteRes<[Ampere1BUnitL, Ampere1BUnitL,
+ Ampere1BUnitL, Ampere1BUnitXY,
+ Ampere1BUnitXY, Ampere1BUnitXY]> {
+ let Latency = 9;
+ let NumMicroOps = 6;
+}
+
+def Ampere1BWrite_9cyc_1X : SchedWriteRes<[Ampere1BUnitX]> {
+ let Latency = 9;
+ let NumMicroOps = 1;
+}
+
+def Ampere1BWrite_9cyc_3XY : SchedWriteRes<[Ampere1BUnitXY, Ampere1BUnitXY, Ampere1BUnitXY]> {
+ let Latency = 9;
+ let NumMicroOps = 3;
+}
+
+def Ampere1BWrite_10cyc_4L_8XY : SchedWriteRes<[Ampere1BUnitL, Ampere1BUnitL,
+ Ampere1BUnitL, Ampere1BUnitL,
+ Ampere1BUnitXY, Ampere1BUnitXY,
+ Ampere1BUnitXY, Ampere1BUnitXY]> {
+ let Latency = 10;
+ let NumMicroOps = 12;
+}
+
+def Ampere1BWrite_11cyc_1BS_2XY : SchedWriteRes<[Ampere1BUnitBS, Ampere1BUnitXY, Ampere1BUnitXY]> {
+ let Latency = 11;
+ let NumMicroOps = 3;
+}
+
+def Ampere1BWrite_11cyc_4L_8XY : SchedWriteRes<[Ampere1BUnitL, Ampere1BUnitL,
+ Ampere1BUnitL, Ampere1BUnitL,
+ Ampere1BUnitXY, Ampere1BUnitXY,
+ Ampere1BUnitXY, Ampere1BUnitXY]> {
+ let Latency = 11;
+ let NumMicroOps = 12;
+}
+
+def Ampere1BWrite_12cyc_1X : SchedWriteRes<[Ampere1BUnitX]> {
+ let Latency = 12;
+ let NumMicroOps = 1;
+}
+
+def Ampere1BWrite_13cyc_1BS_1X : SchedWriteRes<[Ampere1BUnitBS, Ampere1BUnitX]> {
+ let Latency = 13;
+ let NumMicroOps = 2;
+}
+
+def Ampere1BWrite_17cyc_1X : SchedWriteRes<[Ampere1BUnitX]> {
+ let Latency = 17;
+ let NumMicroOps = 1;
+}
+
+def Ampere1BWrite_19cyc_2BS_1X : SchedWriteRes<[Ampere1BUnitBS,
+ Ampere1BUnitBS,
+ Ampere1BUnitX]> {
+ let Latency = 13;
+ let NumMicroOps = 3;
+}
+
+def Ampere1BWrite_19cyc_1X : SchedWriteRes<[Ampere1BUnitX]> {
+ let Latency = 19;
+ let NumMicroOps = 1;
+}
+
+def Ampere1BWrite_21cyc_1X : SchedWriteRes<[Ampere1BUnitX]> {
+ let Latency = 21;
+ let NumMicroOps = 1;
+}
+
+def Ampere1BWrite_33cyc_1X : SchedWriteRes<[Ampere1BUnitX]> {
+ let Latency = 33;
+ let NumMicroOps = 1;
+}
+
+def Ampere1BWrite_39cyc_1X : SchedWriteRes<[Ampere1BUnitX]> {
+ let Latency = 39;
+ let NumMicroOps = 1;
+}
+
+def Ampere1BWrite_63cyc_1X : SchedWriteRes<[Ampere1BUnitX]> {
+ let Latency = 63;
+ let NumMicroOps = 1;
+}
+
+// For basic arithmetic, we have more flexibility for short shifts (LSL shift <= 4),
+// which are a single uop, and for extended registers, which have full flexibility
+// across Unit A or B for both uops.
+def Ampere1BWrite_Arith : SchedWriteVariant<[
+ SchedVar<RegExtendedPred, [Ampere1BWrite_2cyc_2AB]>,
+ SchedVar<IsCheapLSL, [Ampere1BWrite_1cyc_1AB]>,
+ SchedVar<NoSchedPred, [Ampere1BWrite_2cyc_1B_1AB]>]>;
+
+def Ampere1BWrite_ArithFlagsetting : SchedWriteVariant<[
+ SchedVar<RegExtendedPred, [Ampere1BWrite_2cyc_2AB]>,
+ SchedVar<IsCheapLSL, [Ampere1BWrite_1cyc_1AB]>,
+ SchedVar<NoSchedPred, [Ampere1BWrite_2cyc_1B_1AB]>]>;
+
+//===----------------------------------------------------------------------===//
+// Map the target-defined scheduler read/write resources and latencies for Ampere-1.
+// This provides a coarse model, which is then specialised below.
+
+def : WriteRes<WriteImm, [Ampere1BUnitAB]>; // MOVN, MOVZ
+def : WriteRes<WriteI, [Ampere1BUnitAB]>; // ALU
+def : WriteRes<WriteISReg, [Ampere1BUnitB, Ampere1BUnitAB]> {
+ let Latency = 2;
+ let NumMicroOps = 2;
+} // ALU of Shifted-Reg
+def : WriteRes<WriteIEReg, [Ampere1BUnitAB, Ampere1BUnitAB]> {
+ let Latency = 2;
+ let NumMicroOps = 2;
+} // ALU of Extended-Reg
+def : WriteRes<WriteExtr, [Ampere1BUnitB]>; // EXTR shifts a reg pair
+def : WriteRes<WriteIS, [Ampere1BUnitB]>; // Shift/Scale
+def : WriteRes<WriteID32, [Ampere1BUnitBS, Ampere1BUnitX]> {
+ let Latency = 13;
+} // 32-bit Divide
+def : WriteRes<WriteID64, [Ampere1BUnitBS, Ampere1BUnitX]> {
+ let Latency = 19;
+} // 64-bit Divide
+def : WriteRes<WriteIM32, [Ampere1BUnitBS]> {
+ let Latency = 3;
+} // 32-bit Multiply
+def : WriteRes<WriteIM64, [Ampere1BUnitBS, Ampere1BUnitAB]> {
+ let Latency = 3;
+} // 64-bit Multiply
+def : WriteRes<WriteBr, [Ampere1BUnitA]>;
+def : WriteRes<WriteBrReg, [Ampere1BUnitA, Ampere1BUnitA]>;
+def : WriteRes<WriteLD, [Ampere1BUnitL]> {
+ let Latency = 3;
+} // Load from base addr plus immediate offset
+def : WriteRes<WriteST, [Ampere1BUnitS]> {
+ let Latency = 1;
+} // Store to base addr plus immediate offset
+def : WriteRes<WriteSTP, [Ampere1BUnitS, Ampere1BUnitS]> {
+ let Latency = 1;
+ let NumMicroOps = 1;
+} // Store a register pair.
+def : WriteRes<WriteAdr, [Ampere1BUnitAB]>;
+def : WriteRes<WriteLDIdx, [Ampere1BUnitAB, Ampere1BUnitS]> {
+ let Latency = 3;
+ let NumMicroOps = 1;
+} // Load from a register index (maybe scaled).
+def : WriteRes<WriteSTIdx, [Ampere1BUnitS, Ampere1BUnitS]> {
+ let Latency = 1;
+ let NumMicroOps = 2;
+} // Store to a register index (maybe scaled).
+def : WriteRes<WriteF, [Ampere1BUnitXY]> {
+ let Latency = 2;
+} // General floating-point ops.
+def : WriteRes<WriteFCmp, [Ampere1BUnitX]> {
+ let Latency = 3;
+} // Floating-point compare.
+def : WriteRes<WriteFCvt, [Ampere1BUnitXY]> {
+ let Latency = 3;
+} // Float conversion.
+def : WriteRes<WriteFCopy, [Ampere1BUnitXY]> {
+} // Float-int register copy.
+def : WriteRes<WriteFImm, [Ampere1BUnitXY]> {
+ let Latency = 2;
+} // Float-int register copy.
+def : WriteRes<WriteFMul, [Ampere1BUnitXY]> {
+ let Latency = 4;
+} // Floating-point multiply.
+def : WriteRes<WriteFDiv, [Ampere1BUnitXY]> {
+ let Latency = 19;
+} // Floating-point division.
+def : WriteRes<WriteVd, [Ampere1BUnitXY]> {
+ let Latency = 3;
+} // 64bit Vector D ops.
+def : WriteRes<WriteVq, [Ampere1BUnitXY]> {
+ let Latency = 3;
+} // 128bit Vector Q ops.
+def : WriteRes<WriteVLD, [Ampere1BUnitL, Ampere1BUnitL]> {
+ let Latency = 4;
+} // Vector loads.
+def : WriteRes<WriteVST, [Ampere1BUnitS, Ampere1BUnitZ]> {
+ let Latency = 2;
+} // Vector stores.
+
+def : WriteRes<WriteAtomic, []> { let Unsupported = 1; }
+
+def : WriteRes<WriteSys, []> { let Latency = 1; }
+def : WriteRes<WriteBarrier, []> { let Latency = 1; }
+def : WriteRes<WriteHint, []> { let Latency = 1; }
+
+def : WriteRes<WriteLDHi, []> {
+ let Latency = 3;
+} // The second register of a load-pair: LDP,LDPSW,LDNP,LDXP,LDAXP
+
+// Forwarding logic.
+def : ReadAdvance<ReadI, 0>;
+def : ReadAdvance<ReadISReg, 0>;
+def : ReadAdvance<ReadIEReg, 0>;
+def : ReadAdvance<ReadIM, 0>;
+def : ReadAdvance<ReadIMA, 1, [WriteIM32, WriteIM64]>;
+def : ReadAdvance<ReadID, 0>;
+def : ReadAdvance<ReadExtrHi, 0>;
+def : ReadAdvance<ReadST, 0>;
+def : ReadAdvance<ReadAdrBase, 0>;
+def : ReadAdvance<ReadVLD, 0>;
+
+//===----------------------------------------------------------------------===//
+// Specialising the scheduling model further for Ampere-1B.
+
+def : InstRW<[Ampere1BWrite_1cyc_1AB], (instrs COPY)>;
+
+// Branch instructions
+def : InstRW<[Ampere1BWrite_1cyc_1A], (instrs Bcc, BL, RET)>;
+def : InstRW<[Ampere1BWrite_1cyc_1A],
+ (instrs CBZW, CBZX, CBNZW, CBNZX, TBZW, TBZX, TBNZW, TBNZX)>;
+def : InstRW<[Ampere1BWrite_1cyc_2A], (instrs BLR)>;
+
+// Common Short Sequence Compression (CSSC)
+def : InstRW<[Ampere1BWrite_1cyc_1AB], (instregex "^ABS[WX]")>;
+def : InstRW<[Ampere1BWrite_3cyc_1BS], (instregex "^CNT[WX]")>;
+def : InstRW<[Ampere1BWrite_1cyc_1B], (instregex "^CTZ[WX]")>;
+def : InstRW<[Ampere1BWrite_1cyc_1AB_1A], (instregex "^[SU](MAX|MIN)[WX]")>;
+
+// Cryptography instructions
+// -- AES encryption/decryption
+def : InstRW<[Ampere1BWrite_2cyc_1XY], (instregex "^AES[DE]")>;
+def : InstRW<[Ampere1BWrite_2cyc_1XY], (instregex "^AESI?MC")>;
+// -- Polynomial multiplication
+def : InstRW<[Ampere1BWrite_2cyc_1XY], (instregex "^PMUL", "^PMULL")>;
+// -- SHA-256 hash
+def : InstRW<[Ampere1BWrite_4cyc_1X], (instregex "^SHA256(H|H2)")>;
+// -- SHA-256 schedule update
+def : InstRW<[Ampere1BWrite_2cyc_1Y], (instregex "^SHA256SU[01]")>;
+// -- SHA-3 instructions
+def : InstRW<[Ampere1BWrite_2cyc_1XY],
+ (instregex "^BCAX", "^EOR3", "^RAX1", "^XAR")>;
+// -- SHA-512 hash
+def : InstRW<[Ampere1BWrite_4cyc_1X], (instregex "^SHA512(H|H2)")>;
+// -- SHA-512 schedule update
+def : InstRW<[Ampere1BWrite_2cyc_1Y], (instregex "^SHA512SU[01]")>;
+// -- SHA1 choose/majority/parity
+def : InstRW<[Ampere1BWrite_4cyc_1X], (instregex "^SHA1[CMP]")>;
+// -- SHA1 hash/schedule update
+def : InstRW<[Ampere1BWrite_2cyc_1Y], (instregex "^SHA1SU[01]")>;
+def : InstRW<[Ampere1BWrite_2cyc_1Y], (instregex "^SHA1H")>;
+// -- SM3 hash
+def : InstRW<[Ampere1BWrite_2cyc_1XY],
+ (instregex "^SM3PARTW[12]$", "^SM3SS1$", "^SM3TT[12][AB]$")>;
+def : InstRW<[Ampere1BWrite_4cyc_1X], (instrs SM4E, SM4ENCKEY)>;
+
+// FP and vector load instructions
+// -- Load 1-element structure to one/all lanes
+// ---- all lanes
+def : InstRW<[Ampere1BWrite_6cyc_1L_1XY],
+ (instregex "^LD1Rv(8b|4h|2s|16b|8h|4s|2d)")>;
+// ---- one lane
+def : InstRW<[Ampere1BWrite_6cyc_1L_1XY],
+ (instregex "^LD1i(8|16|32|64)")>;
+// -- Load 1-element structure to one/all lanes, 1D size
+def : InstRW<[Ampere1BWrite_4cyc_1L],
+ (instregex "^LD1Rv1d")>;
+// -- Load 1-element structures to 1 register
+def : InstRW<[Ampere1BWrite_4cyc_1L],
+ (instregex "^LD1Onev(8b|4h|2s|1d|16b|8h|4s|2d)")>;
+// -- Load 1-element structures to 2 registers
+def : InstRW<[Ampere1BWrite_4cyc_2L],
+ (instregex "^LD1Twov(8b|4h|2s|1d|16b|8h|4s|2d)")>;
+// -- Load 1-element structures to 3 registers
+def : InstRW<[Ampere1BWrite_5cyc_3L],
+ (instregex "^LD1Threev(8b|4h|2s|1d|16b|8h|4s|2d)")>;
+// -- Load 1-element structures to 4 registers
+def : InstRW<[Ampere1BWrite_5cyc_4L],
+ (instregex "^LD1Fourv(8b|4h|2s|1d|16b|8h|4s|2d)")>;
+// -- Load 2-element structure to all lanes of 2 registers, 1D size
+def : InstRW<[Ampere1BWrite_4cyc_2L],
+ (instregex "^LD2Rv1d")>;
+// -- Load 2-element structure to all lanes of 2 registers, other sizes
+def : InstRW<[Ampere1BWrite_6cyc_2L_2XY],
+ (instregex "^LD2Rv(8b|4h|2s|16b|8h|4s|2d)")>;
+// -- Load 2-element structure to one lane of 2 registers
+def : InstRW<[Ampere1BWrite_6cyc_2L_2XY],
+ (instregex "^LD2i(8|16|32|64)")>;
+// -- Load 2-element structures to 2 registers, 16B/8H/4S/2D size
+def : InstRW<[Ampere1BWrite_6cyc_2L_2XY],
+ (instregex "^LD2Twov(16b|8h|4s|2d)")>;
+// -- Load 2-element structures to 2 registers, 8B/4H/2S size
+def : InstRW<[Ampere1BWrite_8cyc_2L_3XY],
+ (instregex "^LD2Twov(8b|4h|2s)")>;
+// -- Load 3-element structure to all lanes of 3 registers, 1D size
+def : InstRW<[Ampere1BWrite_5cyc_3L],
+ (instregex "^LD3Rv1d")>;
+// -- Load 3-element structure to all lanes of 3 registers, other sizes
+def : InstRW<[Ampere1BWrite_7cyc_3L_3XY],
+ (instregex "^LD3Rv(8b|4h|2s|16b|8h|4s|2d)")>;
+// -- Load 3-element structure to one lane of 3 registers
+def : InstRW<[Ampere1BWrite_7cyc_3L_3XY],
+ (instregex "^LD3i(8|16|32|64)")>;
+// -- Load 3-element structures to 3 registers, 16B/8H/4S sizes
+def : InstRW<[Ampere1BWrite_8cyc_3L_3XY],
+ (instregex "^LD3Threev(16b|8h|4s)")>;
+// -- Load 3-element structures to 3 registers, 2D size
+def : InstRW<[Ampere1BWrite_7cyc_3L_3XY],
+ (instregex "^LD3Threev2d")>;
+// -- Load 3-element structures to 3 registers, 8B/4H/2S sizes
+def : InstRW<[Ampere1BWrite_9cyc_3L_3XY],
+ (instregex "^LD3Threev(8b|4h|2s)")>;
+// -- Load 4-element structure to all lanes of 4 registers, 1D size
+def : InstRW<[Ampere1BWrite_5cyc_4L],
+ (instregex "^LD4Rv1d")>;
+// -- Load 4-element structure to all lanes of 4 registers, other sizes
+def : InstRW<[Ampere1BWrite_7cyc_4L_4XY],
+ (instregex "^LD4Rv(8b|4h|2s|16b|8h|4s|2d)")>;
+// -- Load 4-element structure to one lane of 4 registers
+def : InstRW<[Ampere1BWrite_7cyc_4L_4XY],
+ (instregex "^LD4i(8|16|32|64)")>;
+// -- Load 4-element structures to 4 registers, 2D size
+def : InstRW<[Ampere1BWrite_8cyc_4L_4XY],
+ (instregex "^LD4Fourv2d")>;
+// -- Load 4-element structures to 4 registers, 2S size
+def : InstRW<[Ampere1BWrite_11cyc_4L_8XY],
+ (instregex "^LD4Fourv2s")>;
+// -- Load 4-element structures to 4 registers, other sizes
+def : InstRW<[Ampere1BWrite_10cyc_4L_8XY],
+ (instregex "^LD4Fourv(8b|4h|16b|8h|4s)")>;
+// -- Load pair, Q-form
+def : InstRW<[Ampere1BWrite_4cyc_2L], (instregex "LDN?PQ")>;
+// -- Load pair, S/D-form
+def : InstRW<[Ampere1BWrite_5cyc_1L_1BS], (instregex "LDN?P(S|D)")>;
+// -- Load register
+def : InstRW<[Ampere1BWrite_4cyc_1L], (instregex "LDU?R[BHSDQ]i")>;
+// -- Load register, sign-extended register
+def : InstRW<[Ampere1BWrite_4cyc_1L], (instregex "LDR[BHSDQ]ro(W|X)")>;
+
+// FP and vector store instructions
+// -- Store 1-element structure from one lane of 1 register
+def : InstRW<[Ampere1BWrite_4cyc_1XY_1S_1Z],
+ (instregex "^ST1i(8|16|32|64)")>;
+// -- Store 1-element structures from 1 register
+def : InstRW<[Ampere1BWrite_2cyc_1S_1Z],
+ (instregex "^ST1Onev(8b|4h|2s|1d|16b|8h|4s|2d)")>;
+// -- Store 1-element structures from 2 registers
+def : InstRW<[Ampere1BWrite_3cyc_2S_2Z],
+ (instregex "^ST1Twov(8b|4h|2s|1d|16b|8h|4s|2d)")>;
+// -- Store 1-element structures from 3 registers
+def : InstRW<[Ampere1BWrite_4cyc_3S_3Z],
+ (instregex "^ST1Threev(8b|4h|2s|1d|16b|8h|4s|2d)")>;
+// -- Store 1-element structures from 4 registers
+def : InstRW<[Ampere1BWrite_5cyc_4S_4Z],
+ (instregex "^ST1Fourv(8b|4h|2s|1d|16b|8h|4s|2d)")>;
+// -- Store 2-element structure from one lane of 2 registers
+def : InstRW<[Ampere1BWrite_5cyc_2XY_2S_2Z],
+ (instregex "^ST2i(8|16|32|64)")>;
+// -- Store 2-element structures from 2 registers, 16B/8H/4S/2D sizes
+def : InstRW<[Ampere1BWrite_5cyc_2XY_2S_2Z],
+ (instregex "^ST2Twov(16b|8h|4s|2d)")>;
+// -- Store 2-element structures from 2 registers, 8B/4H/2S sizes
+def : InstRW<[Ampere1BWrite_6cyc_2XY_2S_2Z],
+ (instregex "^ST2Twov(8b|4h|2s)")>;
+// -- Store 3-element structure from one lane of 3 registers
+def : InstRW<[Ampere1BWrite_6cyc_3XY_3S_3Z],
+ (instregex "^ST3i(8|16|32|64)")>;
+// -- Store 3-element structures from 3 registers
+def : InstRW<[Ampere1BWrite_6cyc_3XY_3S_3Z],
+ (instregex "^ST3Threev(8b|4h|2s|1d|16b|8h|4s|2d)")>;
+// -- Store 4-element structure from one lane of 4 registers
+def : InstRW<[Ampere1BWrite_7cyc_4XY_4S_4Z],
+ (instregex "^ST4i(8|16|32|64)")>;
+// -- Store 4-element structures from 4 registers, 16B/8H/4S sizes
+def : InstRW<[Ampere1BWrite_7cyc_4XY_4S_4Z],
+ (instregex "^ST4Fourv(16b|8h|4s)")>;
+// -- Store 4-element structures from 4 registers, 2D sizes
+def : InstRW<[Ampere1BWrite_7cyc_4XY_4S_4Z],
+ (instregex "^ST4Fourv2d")>;
+// -- Store 4-element structures from 4 registers, 8B/4H/2S sizes
+def : InstRW<[Ampere1BWrite_9cyc_6XY_4S_4Z],
+ (instregex "^ST4Fourv(8b|4h|2s)")>;
+// -- Store pair, Q-form
+def : InstRW<[Ampere1BWrite_3cyc_2S_2Z], (instregex "^STN?PQ")>;
+// -- Store pair, S/D-form
+def : InstRW<[Ampere1BWrite_3cyc_2S_2Z], (instregex "^STN?P[SD]")>;
+// -- Store register
+def : InstRW<[Ampere1BWrite_2cyc_1S_1Z], (instregex "^STU?R[BHSDQ](ui|i)")>;
+// -- Store register, sign-extended register offset
+def : InstRW<[Ampere1BWrite_2cyc_1S_1Z], (instregex "^STR[BHSDQ]ro[XW]")>;
+
+// FP data processing, bfloat16 format
+def : InstRW<[Ampere1BWrite_3cyc_1XY], (instrs BFCVT)>;
+def : InstRW<[Ampere1BWrite_8cyc_2XY], (instrs BFCVTN, BFCVTN2)>;
+def : InstRW<[Ampere1BWrite_2cyc_1XY], (instregex "^BFDOTv", "^BF16DOT")>;
+def : InstRW<[Ampere1BWrite_3cyc_1XY], (instrs BFMMLA)>;
+def : InstRW<[Ampere1BWrite_4cyc_1XY], (instregex "^BFMLAL")>;
+
+// FP data processing, scalar/vector, half precision
+def : InstRW<[Ampere1BWrite_3cyc_1XY], (instregex "^F(ABD|ABS)v.[fi]16")>;
+def : InstRW<[Ampere1BWrite_3cyc_1XY],
+ (instregex "^F(ADD|ADDP|CADD|NEG|NMUL|SUB)v.[fi]16")>;
+def : InstRW<[Ampere1BWrite_3cyc_1XY],
+ (instregex "^F(AC|CM)(EQ|GE|GT|LE|LT)v.[fi]16")>;
+def : InstRW<[Ampere1BWrite_3cyc_1XY],
+ (instregex "^F(AC|CM)(EQ|GE|GT|LE|LT)16")>;
+def : InstRW<[Ampere1BWrite_3cyc_1X],
+ (instregex "^FCMPE?H")>;
+def : InstRW<[Ampere1BWrite_9cyc_1A_1BS_1X],
+ (instregex "^FCCMPE?H")>;
+def : InstRW<[Ampere1BWrite_9cyc_1A_1BS_1XY],
+ (instregex "^FCSELH")>;
+def : InstRW<[Ampere1BWrite_3cyc_1XY], (instregex "^FCVT[AMNPZ][SU]v.[if]16")>;
+// Convert FP to integer, H-form
+def : InstRW<[Ampere1BWrite_3cyc_1XY], (instregex "^[SUd]CVTFv.[fi]16")>;
+// Convert to FP from GPR, H-form
+def : InstRW<[Ampere1BWrite_8cyc_1BS_1XY], (instregex "^[SU]CVTF_ZPmZ_[DSH]toH$")>;
+// Convert to FP from GPR, fixed-point, H-form
+def : InstRW<[Ampere1BWrite_11cyc_1BS_2XY], (instregex "^[SU]CVTF[SU][WX]Hri$")>;
+def : InstRW<[Ampere1BWrite_9cyc_1X], (instrs FDIVHrr)>;
+def : InstRW<[Ampere1BWrite_17cyc_1X], (instregex "^FDIVv.[if]16")>;
+def : InstRW<[Ampere1BWrite_3cyc_1XY], (instregex "^F(MAX|MIN)(NM)?P?v.[if]16")>;
+def : InstRW<[Ampere1BWrite_6cyc_2XY], (instregex "^F(MAX|MIN)(NM)?Vv4[if]16")>;
+def : InstRW<[Ampere1BWrite_9cyc_3XY], (instregex "^F(MAX|MIN)(NM)?Vv8[if]16")>;
+def : InstRW<[Ampere1BWrite_4cyc_1XY], (instregex "^FMULX?v.[if]16")>;
+def : InstRW<[Ampere1BWrite_4cyc_1XY], (instrs FMULX16)>;
+def : InstRW<[Ampere1BWrite_4cyc_1XY], (instregex "^FN?M(ADD|SUB)[H]rrr")>;
+def : InstRW<[Ampere1BWrite_4cyc_1XY], (instregex "^FML[AS]v.[if]16")>;
+def : InstRW<[Ampere1BWrite_3cyc_1XY], (instregex "^FRECPXv.[if]16")>;
+def : InstRW<[Ampere1BWrite_4cyc_1XY], (instregex "^F(RECP|RSQRT)S16")>;
+def : InstRW<[Ampere1BWrite_3cyc_1XY], (instregex "^FRINT[AIMNPXZ]v.[if]16")>;
+// FP square root, H-form
+def : InstRW<[Ampere1BWrite_21cyc_1X], (instrs FSQRTHr)>;
+// FP square root, vector-form, F16
+def : InstRW<[Ampere1BWrite_39cyc_1X], (instregex "^FSQRTv.f16")>;
+
+// FP data processing, scalar/vector, single/double precision
+def : InstRW<[Ampere1BWrite_3cyc_1XY], (instregex "^F(ABD|ABS)v.[fi](32|64)")>;
+def : InstRW<[Ampere1BWrite_3cyc_1XY],
+ (instregex "^F(ADD|ADDP|CADD|NEG|NMUL|SUB)v.[fi](32|64)")>;
+def : InstRW<[Ampere1BWrite_3cyc_1XY],
+ (instregex "^F(AC|CM)(EQ|GE|GT|LE|LT)v.[fi](32|64)")>;
+def : InstRW<[Ampere1BWrite_3cyc_1XY],
+ (instregex "^F(AC|CM)(EQ|GE|GT|LE|LT)(32|64)")>;
+def : InstRW<[Ampere1BWrite_3cyc_1X],
+ (instregex "^FCMPE?(S|D)")>;
+def : InstRW<[Ampere1BWrite_9cyc_1A_1BS_1X],
+ (instregex "^FCCMPE?(S|D)")>;
+def : InstRW<[Ampere1BWrite_9cyc_1A_1BS_1XY],
+ (instregex "^FCSEL(S|D)")>;
+def : InstRW<[Ampere1BWrite_3cyc_1XY], (instregex "^FCVT[AMNPZ][SU]v.[if](32|64)")>;
+// Convert FP to integer, S/D-form
+def : InstRW<[Ampere1BWrite_3cyc_1XY], (instregex "^[SUd]CVTFv.[fi](32|64)")>;
+// Convert to FP from GPR, S/D-form
+def : InstRW<[Ampere1BWrite_8cyc_1BS_1XY], (instregex "^[SU]CVTF_ZPmZ_[DSH]to[DS]$")>;
+// Convert to FP from GPR, fixed-point, S/D-form
+def : InstRW<[Ampere1BWrite_11cyc_1BS_2XY], (instregex "^[SU]CVTF[SU][WX][SD]ri$")>;
+def : InstRW<[Ampere1BWrite_19cyc_1X], (instregex "^FDIVv.[if](64)", "FDIVD")>;
+def : InstRW<[Ampere1BWrite_12cyc_1X], (instregex "^FDIVv.[if](32)", "FDIVS")>;
+def : InstRW<[Ampere1BWrite_3cyc_1XY], (instregex "^F(MAX|MIN)(NM)?P?v.[if](32|64)")>;
+def : InstRW<[Ampere1BWrite_6cyc_2XY], (instregex "^F(MAX|MIN)(NM)?Vv.[if](32|64)")>;
+def : InstRW<[Ampere1BWrite_4cyc_1XY], (instregex "^FMULX?v.[if](32|64)")>;
+def : InstRW<[Ampere1BWrite_4cyc_1XY], (instrs FMULX32, FMULX64)>;
+def : InstRW<[Ampere1BWrite_4cyc_1XY], (instrs FMULSrr, FNMULSrr)>;
+def : InstRW<[Ampere1BWrite_4cyc_1XY], (instrs FMULDrr, FNMULDrr)>;
+def : InstRW<[Ampere1BWrite_4cyc_1XY], (instregex "^FN?M(ADD|SUB)[SD]rrr")>;
+def : InstRW<[Ampere1BWrite_4cyc_1XY], (instregex "^FML[AS]v.[if](32|64)")>;
+def : InstRW<[Ampere1BWrite_3cyc_1XY], (instregex "^FRECPXv.[if](32|64)")>;
+def : InstRW<[Ampere1BWrite_3cyc_1XY], (instregex "^F(RECP|RSQRT)S(32|64)")>;
+def : InstRW<[Ampere1BWrite_3cyc_1XY], (instregex "^FRINT[AIMNPXZ]v.[if](32|64)")>;
+def : InstRW<[Ampere1BWrite_3cyc_1XY], (instregex "^FRINT(32|64)")>;
+def : InstRW<[Ampere1BWrite_63cyc_1X], (instregex "^FSQRTv.f64", "^FSQRTDr")>;
+def : InstRW<[Ampere1BWrite_33cyc_1X], (instregex "^FSQRTv.f32", "^FSQRTSr")>;
+
+// FP miscellaneous instructions
+def : InstRW<[Ampere1BWrite_7cyc_1XY_1Z], (instregex "^FCVT[AMNPZ][SU][SU][XW][HSD]r")>;
+def : InstRW<[Ampere1BWrite_3cyc_1XY], (instregex "^FCVT[HSD]Hr")>;
+def : InstRW<[Ampere1BWrite_3cyc_1XY], (instregex "^FCVT[HSD][SD]r")>;
+def : InstRW<[Ampere1BWrite_3cyc_1XY], (instregex "^FCVTLv")>;
+def : InstRW<[Ampere1BWrite_3cyc_1XY], (instregex "^FCVT(N|XN)v")>;
+def : InstRW<[Ampere1BWrite_7cyc_1X_1Z], (instrs FJCVTZS)>;
+def : InstRW<[Ampere1BWrite_5cyc_1BS], (instregex "^FMOV[HSD][WX]r")>;
+def : InstRW<[Ampere1BWrite_7cyc_1BS_1XY], (instregex "^FMOVDXHighr")>;
+def : InstRW<[Ampere1BWrite_2cyc_1XY], (instregex "^FMOV[HSD][ri]")>;
+def : InstRW<[Ampere1BWrite_5cyc_1X], (instregex "^FMOVXDHighr")>;
+def : InstRW<[Ampere1BWrite_3cyc_1Z], (instregex "^FMOV[WX][HSD]r")>;
+
+// Integer arithmetic and logical instructions
+def : InstRW<[Ampere1BWrite_1cyc_1A],
+ (instregex "ADC(W|X)r", "SBC(W|X)r")>;
+def : InstRW<[Ampere1BWrite_Arith],
+ (instregex "(ADD|AND|BIC|EON|EOR|ORN|ORR|SUB)[WX]r[sx]")>;
+def : InstRW<[Ampere1BWrite_1cyc_1AB],
+ (instregex "(ADD|AND|BIC|EON|EOR|ORN|ORR|SUB)[WX]r[ri]")>;
+def : InstRW<[Ampere1BWrite_ArithFlagsetting],
+ (instregex "(ADD|AND|BIC|SUB)S[WX]r[sx]")>;
+def : InstRW<[Ampere1BWrite_1cyc_1A],
+ (instregex "(ADD|AND|BIC|SUB)S[WX]r[ri]")>;
+def : InstRW<[Ampere1BWrite_1cyc_1A],
+ (instregex "(ADC|SBC)S[WX]r")>;
+def : InstRW<[Ampere1BWrite_1cyc_1A], (instrs RMIF)>;
+def : InstRW<[Ampere1BWrite_1cyc_1A],
+ (instregex "(CCMN|CCMP)(X|W)")>;
+def : InstRW<[Ampere1BWrite_1cyc_1A],
+ (instregex "(CSEL|CSINC|CSINV|CSNEG)(X|W)")>;
+def : InstRW<[Ampere1BWrite_13cyc_1BS_1X], (instrs SDIVWr, UDIVWr)>;
+def : InstRW<[Ampere1BWrite_19cyc_2BS_1X], (instrs SDIVXr, UDIVXr)>;
+def : InstRW<[Ampere1BWrite_3cyc_1BS],
+ (instregex "(S|U)MULHr")>;
+def : InstRW<[Ampere1BWrite_4cyc_1BS_1AB],
+ (instregex "(S|U)?M(ADD|SUB)L?r")>;
+
+// Integer load instructions
+def : InstRW<[Ampere1BWrite_3cyc_1L],
+ (instregex "(LDNP|LDP|LDPSW)(X|W)")>;
+def : InstRW<[Ampere1BWrite_3cyc_1L],
+ (instregex "LDR(B|D|H|Q|S)ui")>;
+def : InstRW<[Ampere1BWrite_3cyc_1L],
+ (instregex "LDR(D|Q|W|X)l")>;
+def : InstRW<[Ampere1BWrite_3cyc_1L],
+ (instregex "LDTR(B|H|W|X)i")>;
+def : InstRW<[Ampere1BWrite_3cyc_1L],
+ (instregex "LDTRS(BW|BX|HW|HX|W)i")>;
+def : InstRW<[Ampere1BWrite_3cyc_1L],
+ (instregex "LDUR(BB|HH|X|W)i")>;
+def : InstRW<[Ampere1BWrite_3cyc_1L],
+ (instregex "LDURS(BW|BX|HW|HX|W)i")>;
+def : InstRW<[Ampere1BWrite_3cyc_1L],
+ (instregex "LDR(HH|SHW|SHX|W|X)ro(W|X)")>;
+def : InstRW<[Ampere1BWrite_1cyc_1L],
+ (instrs PRFMl, PRFUMi, PRFUMi)>;
+def : InstRW<[Ampere1BWrite_1cyc_1L],
+ (instrs PRFMroW, PRFMroX)>;
+
+// Integer miscellaneous instructions
+def : InstRW<[Ampere1BWrite_1cyc_1A], (instrs ADR, ADRP)>;
+def : InstRW<[Ampere1BWrite_1cyc_1B], (instregex "EXTR(W|X)")>;
+def : InstRW<[Ampere1BWrite_1cyc_1B], (instregex "(S|U)?BFM(W|X)")>;
+def : InstRW<[Ampere1BWrite_3cyc_1BS], (instregex "^CRC32C?[BHWX]")>;
+def : InstRW<[Ampere1BWrite_1cyc_1B], (instregex "CLS(W|X)")>;
+def : InstRW<[Ampere1BWrite_1cyc_1A], (instrs SETF8, SETF16)>;
+def : InstRW<[Ampere1BWrite_1cyc_1AB],
+ (instrs MOVKWi, MOVKXi, MOVNWi, MOVNXi, MOVZWi, MOVZXi)>;
+def : InstRW<[Ampere1BWrite_1cyc_1B],
+ (instregex "(RBIT|REV|REV16)(W|X)r", "REV32Xr")>;
+def : InstRW<[Ampere1BWrite_1cyc_1B],
+ (instregex "(ASR|LSL|LSR|ROR)V(W|X)r")>;
+
+// Integer store instructions
+def : InstRW<[Ampere1BWrite_1cyc_2S], (instregex "STNP(X|W)i")>;
+def : InstRW<[Ampere1BWrite_1cyc_2S], (instrs STPXi)>;
+def : InstRW<[Ampere1BWrite_2cyc_1B_1S], (instrs STPWi)>;
+def : InstRW<[Ampere1BWrite_2cyc_1B_1S_1AB], (instregex "STP(W|X)(pre|post)")>;
+def : InstRW<[Ampere1BWrite_1cyc_1S], (instrs STTRBi, STTRHi, STTRWi, STTRXi)>;
+def : InstRW<[Ampere1BWrite_1cyc_1S], (instregex "STUR(BB|HH|X|W)i",
+ "STR(X|W)ui",
+ "STUR(BB|HH|X|W)i")>;
+def : InstRW<[Ampere1BWrite_1cyc_2S], (instrs STRWroX, STRXroX)>;
+def : InstRW<[Ampere1BWrite_1cyc_2S], (instrs STRWroW, STRXroW)>;
+
+// Memory tagging
+
+// Insert Random Tags
+def : InstRW<[Ampere1BWrite_1cyc_1BS_1B], (instrs IRG, IRGstack)>;
+// Load allocation tag
+def : InstRW<[Ampere1BWrite_4cyc_1L_1B], (instrs LDG, LDGM)>;
+// Store allocation tags
+def : InstRW<[Ampere1BWrite_1cyc_1S],
+ (instrs STGi, STGM, STGPreIndex, STGPostIndex)>;
+// Store allocation tags and pair of registers
+def : InstRW<[Ampere1BWrite_1cyc_2S],
+ (instrs STGPi, STGPpre, STGPpost)>;
+// Store allocation tags and zero data
+def : InstRW<[Ampere1BWrite_1cyc_1S],
+ (instrs STZGi, STZGM, STZGPreIndex, STZGPostIndex)>;
+// Store two tags
+def : InstRW<[Ampere1BWrite_1cyc_2S],
+ (instrs ST2Gi, ST2GPreIndex, ST2GPostIndex)>;
+// Store two tags and zero data
+def : InstRW<[Ampere1BWrite_1cyc_2S],
+ (instrs STZ2Gi, STZ2GPreIndex, STZ2GPostIndex)>;
+// Subtract Pointer
+def : InstRW<[Ampere1BWrite_1cyc_1AB], (instrs SUBP)>;
+// Subtract Pointer, flagset
+def : InstRW<[Ampere1BWrite_1cyc_1AB], (instrs SUBPS)>;
+// Insert Tag Mask
+def : InstRW<[Ampere1BWrite_1cyc_1AB], (instrs GMI)>;
+// Arithmetic, immediate to logical address tag
+def : InstRW<[Ampere1BWrite_1cyc_1B], (instrs ADDG, SUBG)>;
+
+// Pointer authentication
+def : InstRW<[Ampere1BWrite_5cyc_1BS], (instregex "^AUT")>;
+def : InstRW<[Ampere1BWrite_6cyc_1BS_1A],
+ (instregex "BRA(A|AZ|B|BZ)", "RETA(A|B)", "ERETA(A|B)")>;
+def : InstRW<[Ampere1BWrite_6cyc_1BS_2A],
+ (instrs BLRAA, BLRAAZ, BLRAB, BLRABZ)>;
+def : InstRW<[Ampere1BWrite_5cyc_1BS], (instregex "^PAC")>;
+def : InstRW<[Ampere1BWrite_8cyc_1BS_1L], (instregex "^LDRA(A|B)")>;
+def : InstRW<[Ampere1BWrite_1cyc_1B], (instrs XPACD, XPACI)>;
+
+// Vector integer instructions
+// -- absolute difference
+def : InstRW<[Ampere1BWrite_2cyc_1XY],
+ (instregex "^SABAv", "^SABALv", "^SABDv", "^SABDLv",
+ "^UABAv", "^UABALv", "^UABDv", "^UABDLv")>;
+// -- arithmetic
+def : InstRW<[Ampere1BWrite_2cyc_1XY],
+ (instregex "^ABSv", "^(ADD|SUB)v", "^SADDLv", "^SADDW", "SHADD",
+ "SHSUB", "^SRHADD", "^URHADD", "SSUBL", "SSUBW",
+ "^UADDLv", "^UADDW", "UHADD", "UHSUB", "USUBL", "USUBW")>;
+// -- arithmetic, horizontal, 16B
+def : InstRW<[Ampere1BWrite_8cyc_4XY],
+ (instregex "^ADDVv16i8v", "^SADDLVv16i8v", "^UADDLVv16i8v")>;
+def : InstRW<[Ampere1BWrite_8cyc_4XY],
+ (instregex "^[SU](MIN|MAX)Vv16i8v")>;
+// -- arithmetic, horizontal, 4H/4S
+def : InstRW<[Ampere1BWrite_4cyc_2XY],
+ (instregex "^[SU]?ADDL?V(v8i8|v4i16|v2i32)v")>;
+def : InstRW<[Ampere1BWrite_4cyc_2XY],
+ (instregex "^[SU](MIN|MAX)V(v4i16|v4i32)v")>;
+// -- arithmetic, horizontal, 8B/8H
+def : InstRW<[Ampere1BWrite_6cyc_3XY],
+ (instregex "^[SU]?ADDL?V(v8i16|v4i32)v")>;
+def : InstRW<[Ampere1BWrite_6cyc_3XY],
+ (instregex "^[SU](MIN|MAX)V(v8i8|v8i16)v")>;
+// -- arithmetic, narrowing
+def : InstRW<[Ampere1BWrite_6cyc_2XY], (instregex "(ADD|SUB)HNv.*")>;
+def : InstRW<[Ampere1BWrite_6cyc_2XY], (instregex "(RADD|RSUB)HNv.*")>;
+// -- arithmetic, pairwise
+def : InstRW<[Ampere1BWrite_2cyc_1XY],
+ (instregex "^ADDPv", "^SADALP", "^UADALP", "^SADDLPv", "^UADDLPv")>;
+// -- arithmetic, saturating
+def : InstRW<[Ampere1BWrite_2cyc_1XY],
+ (instregex "^SQADD", "^SQSUB", "^SUQADD", "^UQADD", "^UQSUB", "^USQADD")>;
+// -- bit count
+def : InstRW<[Ampere1BWrite_2cyc_1XY],
+ (instregex "^(CLS|CLZ|CNT)v")>;
+// -- compare
+def : InstRW<[Ampere1BWrite_2cyc_1XY],
+ (instregex "^CMEQv", "^CMGEv", "^CMGTv", "^CMLEv", "^CMLTv",
+ "^CMHIv", "^CMHSv")>;
+// -- compare non-zero
+def : InstRW<[Ampere1BWrite_2cyc_1XY], (instregex "^CMTSTv")>;
+// -- dot product
+def : InstRW<[Ampere1BWrite_3cyc_1XY], (instregex "^(S|SU|U|US)DOTv")>;
+// -- fp reciprocal estimate
+def : InstRW<[Ampere1BWrite_6cyc_1X], (instregex "^FRECPEv", "^FRSQRTEv")>;
+// -- integer reciprocal estimate
+def : InstRW<[Ampere1BWrite_2cyc_1XY], (instregex "^URECPEv", "^URSQRTEv")>;
+// -- logical
+def : InstRW<[Ampere1BWrite_2cyc_1XY],
+ (instregex "^ANDv", "^BICv", "^EORv", "^ORRv", "^ORNv", "^NOTv")>;
+// -- logical, narrowing
+def : InstRW<[Ampere1BWrite_6cyc_2XY],
+ (instregex "RSHRNv",
+ "SHRNv", "SQSHRNv", "SQSHRUNv",
+ "UQXTNv")>;
+// -- matrix multiply
+def : InstRW<[Ampere1BWrite_3cyc_1XY],
+ (instrs SMMLA, UMMLA, USMMLA)>;
+// -- max/min
+def : InstRW<[Ampere1BWrite_2cyc_1XY],
+ (instregex "^SMAXv", "^SMINv", "^UMAXv", "^UMINv")>;
+def : InstRW<[Ampere1BWrite_2cyc_1XY],
+ (instregex "^SMAXPv", "^SMINPv", "^UMAXPv", "^UMINPv")>;
+// -- move immediate
+def : InstRW<[Ampere1BWrite_2cyc_1XY], (instregex "^MOVIv", "^MVNIv")>;
+// -- multiply
+def : InstRW<[Ampere1BWrite_3cyc_1XY],
+ (instregex "MULv", "SMULLv", "UMULLv", "SQDMUL(H|L)v", "SQRDMULHv")>;
+// -- multiply accumulate
+def : InstRW<[Ampere1BWrite_3cyc_1XY],
+ (instregex "MLAv", "MLSv", "(S|U|SQD)(MLAL|MLSL)v", "SQRDML(A|S)Hv")>;
+// -- negation, saturating
+def : InstRW<[Ampere1BWrite_2cyc_1XY], (instregex "^SQABS", "^SQNEG")>;
+// -- reverse bits/bytes
+def : InstRW<[Ampere1BWrite_2cyc_1XY],
+ (instregex "^RBITv", "^REV16v", "^REV32v", "^REV64v")>;
+// -- shift
+def : InstRW<[Ampere1BWrite_2cyc_1XY], (instregex "^[SU]SHL(v16i8|v8i16|v4i32|v2i64)")>;
+// -- shift and accumulate
+def : InstRW<[Ampere1BWrite_2cyc_1XY],
+ (instregex "SRSRAv", "SSRAv", "URSRAv", "USRAv")>;
+// -- shift, saturating
+def : InstRW<[Ampere1BWrite_2cyc_1XY],
+ (instregex "^SQRSHLv", "^SQRSHRNv", "^SQRSHRUNv", "^SQSHL", "^SQSHLU",
+ "^SQXTNv", "^SQXTUNv", "^UQSHRNv", "UQRSHRNv", "^UQRSHL",
+ "^UQSHL")>;
+
+// Vector miscellaneous instructions
+// -- duplicate element
+def : InstRW<[Ampere1BWrite_2cyc_1XY], (instregex "^DUPv.+lane")>;
+// -- duplicate from GPR
+def : InstRW<[Ampere1BWrite_5cyc_1BS], (instregex "^DUPv.+gpr")>;
+// -- extract narrow
+def : InstRW<[Ampere1BWrite_2cyc_1XY], (instregex "^XTNv")>;
+// -- insert/extract element
+def : InstRW<[Ampere1BWrite_2cyc_1XY], (instregex "^EXTv", "^INSv.+lane")>;
+// -- move FP immediate
+def : InstRW<[Ampere1BWrite_2cyc_1XY], (instregex "^FMOVv")>;
+// -- move element to GPR
+def : InstRW<[Ampere1BWrite_5cyc_1X], (instregex "(S|U)MOVv")>;
+// -- move from GPR to any element
+def : InstRW<[Ampere1BWrite_7cyc_1BS_1XY], (instregex "^INSv.+gpr")>;
+// -- table lookup
+def : InstRW<[Ampere1BWrite_2cyc_1XY],
+ (instrs TBLv8i8One, TBLv16i8One, TBXv8i8One, TBXv16i8One)>;
+def : InstRW<[Ampere1BWrite_4cyc_2XY],
+ (instrs TBLv8i8Two, TBLv16i8Two, TBXv8i8Two, TBXv16i8Two)>;
+def : InstRW<[Ampere1BWrite_6cyc_3XY],
+ (instrs TBLv8i8Three, TBLv16i8Three, TBXv8i8Three, TBXv16i8Three)>;
+def : InstRW<[Ampere1BWrite_8cyc_4XY],
+ (instrs TBLv8i8Four, TBLv16i8Four, TBXv8i8Four, TBXv16i8Four)>;
+// -- transpose
+def : InstRW<[Ampere1BWrite_2cyc_1XY],
+ (instregex "^TRN1v", "^TRN2v", "^UZP1v", "^UZP2v")>;
+// -- zip/unzip
+def : InstRW<[Ampere1BWrite_2cyc_1XY], (instregex "^ZIP1v", "^ZIP2v")>;
+
+} // SchedModel = Ampere1BModel
diff --git a/llvm/lib/Target/AArch64/AArch64SchedCyclone.td b/llvm/lib/Target/AArch64/AArch64SchedCyclone.td
index 1ef3a2a..4832465 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedCyclone.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedCyclone.td
@@ -21,7 +21,7 @@ def CycloneModel : SchedMachineModel {
list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F,
SMEUnsupported.F,
- [HasMTE]);
+ [HasMTE, HasCSSC]);
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AArch64/AArch64SchedExynosM3.td b/llvm/lib/Target/AArch64/AArch64SchedExynosM3.td
index 2127a34..6fc4ec3 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedExynosM3.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedExynosM3.td
@@ -27,7 +27,7 @@ def ExynosM3Model : SchedMachineModel {
list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F,
SMEUnsupported.F,
- [HasMTE]);
+ [HasMTE, HasCSSC]);
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AArch64/AArch64SchedExynosM4.td b/llvm/lib/Target/AArch64/AArch64SchedExynosM4.td
index 83cf560..5163de2 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedExynosM4.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedExynosM4.td
@@ -27,7 +27,7 @@ def ExynosM4Model : SchedMachineModel {
list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F,
SMEUnsupported.F,
- [HasMTE]);
+ [HasMTE, HasCSSC]);
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AArch64/AArch64SchedExynosM5.td b/llvm/lib/Target/AArch64/AArch64SchedExynosM5.td
index 85058af8..2ccbe16 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedExynosM5.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedExynosM5.td
@@ -27,7 +27,7 @@ def ExynosM5Model : SchedMachineModel {
list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F,
SMEUnsupported.F,
- [HasMTE]);
+ [HasMTE, HasCSSC]);
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AArch64/AArch64SchedFalkor.td b/llvm/lib/Target/AArch64/AArch64SchedFalkor.td
index a765cd1..e9172e8 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedFalkor.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedFalkor.td
@@ -26,7 +26,7 @@ def FalkorModel : SchedMachineModel {
list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F,
SMEUnsupported.F,
- [HasMTE]);
+ [HasMTE, HasCSSC]);
// FIXME: Remove when all errors have been fixed.
let FullInstRWOverlapCheck = 0;
}
diff --git a/llvm/lib/Target/AArch64/AArch64SchedKryo.td b/llvm/lib/Target/AArch64/AArch64SchedKryo.td
index 3551066..258b34c 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedKryo.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedKryo.td
@@ -30,7 +30,7 @@ def KryoModel : SchedMachineModel {
list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F,
SMEUnsupported.F,
- [HasMTE]);
+ [HasMTE, HasCSSC]);
// FIXME: Remove when all errors have been fixed.
let FullInstRWOverlapCheck = 0;
}
diff --git a/llvm/lib/Target/AArch64/AArch64SchedNeoverseN1.td b/llvm/lib/Target/AArch64/AArch64SchedNeoverseN1.td
index 2ec9600..524fa33 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedNeoverseN1.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedNeoverseN1.td
@@ -25,7 +25,7 @@ def NeoverseN1Model : SchedMachineModel {
list<Predicate> UnsupportedFeatures = !listconcat(PAUnsupported.F,
SMEUnsupported.F,
SVEUnsupported.F,
- [HasMTE]);
+ [HasMTE, HasCSSC]);
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AArch64/AArch64SchedNeoverseN2.td b/llvm/lib/Target/AArch64/AArch64SchedNeoverseN2.td
index a6fab5e..8ec1249 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedNeoverseN2.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedNeoverseN2.td
@@ -19,7 +19,7 @@ def NeoverseN2Model : SchedMachineModel {
let CompleteModel = 1;
list<Predicate> UnsupportedFeatures = !listconcat(SMEUnsupported.F,
- [HasSVE2p1, HasPAuthLR, HasCPA]);
+ [HasSVE2p1, HasPAuthLR, HasCPA, HasCSSC]);
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AArch64/AArch64SchedNeoverseV1.td b/llvm/lib/Target/AArch64/AArch64SchedNeoverseV1.td
index 75fbb85..613db35 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedNeoverseV1.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedNeoverseV1.td
@@ -28,7 +28,8 @@ def NeoverseV1Model : SchedMachineModel {
list<Predicate> UnsupportedFeatures = !listconcat(SVE2Unsupported.F,
SMEUnsupported.F,
- [HasMTE, HasCPA]);
+ [HasMTE, HasCPA,
+ HasCSSC]);
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AArch64/AArch64SchedNeoverseV2.td b/llvm/lib/Target/AArch64/AArch64SchedNeoverseV2.td
index 658d7cd..e7de40f 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedNeoverseV2.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedNeoverseV2.td
@@ -22,7 +22,8 @@ def NeoverseV2Model : SchedMachineModel {
let CompleteModel = 1;
list<Predicate> UnsupportedFeatures = !listconcat(SMEUnsupported.F,
- [HasSVE2p1, HasCPA]);
+ [HasSVE2p1, HasCPA,
+ HasCSSC]);
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AArch64/AArch64SchedTSV110.td b/llvm/lib/Target/AArch64/AArch64SchedTSV110.td
index 9e5060f..0ae9a69 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedTSV110.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedTSV110.td
@@ -27,7 +27,7 @@ def TSV110Model : SchedMachineModel {
list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F,
SMEUnsupported.F,
- [HasMTE]);
+ [HasMTE, HasCSSC]);
}
// Define each kind of processor resource and number available on the TSV110,
diff --git a/llvm/lib/Target/AArch64/AArch64SchedThunderX.td b/llvm/lib/Target/AArch64/AArch64SchedThunderX.td
index e1536f2..8df3f56 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedThunderX.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedThunderX.td
@@ -28,7 +28,7 @@ def ThunderXT8XModel : SchedMachineModel {
list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F,
SMEUnsupported.F,
- [HasMTE]);
+ [HasMTE, HasCSSC]);
// FIXME: Remove when all errors have been fixed.
let FullInstRWOverlapCheck = 0;
}
diff --git a/llvm/lib/Target/AArch64/AArch64SchedThunderX2T99.td b/llvm/lib/Target/AArch64/AArch64SchedThunderX2T99.td
index 89faa92..ef4baa3 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedThunderX2T99.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedThunderX2T99.td
@@ -28,7 +28,7 @@ def ThunderX2T99Model : SchedMachineModel {
list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F,
SMEUnsupported.F,
- [HasMTE]);
+ [HasMTE, HasCSSC]);
// FIXME: Remove when all errors have been fixed.
let FullInstRWOverlapCheck = 0;
}
diff --git a/llvm/lib/Target/AArch64/AArch64SchedThunderX3T110.td b/llvm/lib/Target/AArch64/AArch64SchedThunderX3T110.td
index 8685554..796bd4b 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedThunderX3T110.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedThunderX3T110.td
@@ -27,7 +27,7 @@ def ThunderX3T110Model : SchedMachineModel {
list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F,
SMEUnsupported.F,
- [HasMTE]);
+ [HasMTE, HasCSSC]);
// FIXME: Remove when all errors have been fixed.
let FullInstRWOverlapCheck = 0;
}
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index 9d51a7f..ac80485 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -2091,7 +2091,7 @@ bool AArch64InstructionSelector::preISelLower(MachineInstr &I) {
case AArch64::G_DUP: {
// Convert the type from p0 to s64 to help selection.
LLT DstTy = MRI.getType(I.getOperand(0).getReg());
- if (!DstTy.getElementType().isPointer())
+ if (!DstTy.isPointerVector())
return false;
auto NewSrc = MIB.buildCopy(LLT::scalar(64), I.getOperand(1).getReg());
MRI.setType(I.getOperand(0).getReg(),
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index cbf5655..933f13d 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -343,10 +343,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
auto IsPtrVecPred = [=](const LegalityQuery &Query) {
const LLT &ValTy = Query.Types[0];
- if (!ValTy.isVector())
- return false;
- const LLT EltTy = ValTy.getElementType();
- return EltTy.isPointer() && EltTy.getAddressSpace() == 0;
+ return ValTy.isPointerVector() && ValTy.getAddressSpace() == 0;
};
getActionDefinitionsBuilder(G_LOAD)
@@ -521,7 +518,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
[=](const LegalityQuery &Query) {
const LLT &Ty = Query.Types[0];
const LLT &SrcTy = Query.Types[1];
- return Ty.isVector() && !SrcTy.getElementType().isPointer() &&
+ return Ty.isVector() && !SrcTy.isPointerVector() &&
Ty.getElementType() != SrcTy.getElementType();
},
0, 1)
@@ -555,7 +552,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
[=](const LegalityQuery &Query) {
const LLT &Ty = Query.Types[0];
const LLT &SrcTy = Query.Types[1];
- return Ty.isVector() && !SrcTy.getElementType().isPointer() &&
+ return Ty.isVector() && !SrcTy.isPointerVector() &&
Ty.getElementType() != SrcTy.getElementType();
},
0, 1)
@@ -615,9 +612,8 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
.lowerIf([=](const LegalityQuery &Query) {
LLT DstTy = Query.Types[0];
LLT SrcTy = Query.Types[1];
- return DstTy.isVector() && (SrcTy.getSizeInBits() > 128 ||
- (DstTy.getScalarSizeInBits() * 2 <
- SrcTy.getScalarSizeInBits()));
+ return DstTy.isVector() && SrcTy.getSizeInBits() > 128 &&
+ DstTy.getScalarSizeInBits() * 2 <= SrcTy.getScalarSizeInBits();
})
.alwaysLegal();
@@ -1649,7 +1645,7 @@ bool AArch64LegalizerInfo::legalizeLoadStore(
return true;
}
- if (!ValTy.isVector() || !ValTy.getElementType().isPointer() ||
+ if (!ValTy.isPointerVector() ||
ValTy.getElementType().getAddressSpace() != 0) {
LLVM_DEBUG(dbgs() << "Tried to do custom legalization on wrong load/store");
return false;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
index 5777a7c..37a36b2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
@@ -204,7 +204,8 @@ void AMDGPUAsmPrinter::emitFunctionBodyStart() {
if (MFI.getNumKernargPreloadedSGPRs() > 0) {
assert(AMDGPU::hasKernargPreload(STM));
- getTargetStreamer()->EmitKernargPreloadHeader(*getGlobalSTI());
+ getTargetStreamer()->EmitKernargPreloadHeader(*getGlobalSTI(),
+ STM.isAmdHsaOS());
}
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 97952de..0d3b158 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -239,6 +239,7 @@ static bool isRegisterVectorType(LLT Ty) {
EltSize == 128 || EltSize == 256;
}
+// TODO: replace all uses of isRegisterType with isRegisterClassType
static bool isRegisterType(LLT Ty) {
if (!isRegisterSize(Ty.getSizeInBits()))
return false;
@@ -258,6 +259,8 @@ static LegalityPredicate isRegisterType(unsigned TypeIdx) {
}
// RegisterType that doesn't have a corresponding RegClass.
+// TODO: Once `isRegisterType` is replaced with `isRegisterClassType` this
+// should be removed.
static LegalityPredicate isIllegalRegisterType(unsigned TypeIdx) {
return [=](const LegalityQuery &Query) {
LLT Ty = Query.Types[TypeIdx];
@@ -276,6 +279,82 @@ static LegalityPredicate elementTypeIsLegal(unsigned TypeIdx) {
};
}
+static const LLT S1 = LLT::scalar(1);
+static const LLT S8 = LLT::scalar(8);
+static const LLT S16 = LLT::scalar(16);
+static const LLT S32 = LLT::scalar(32);
+static const LLT S64 = LLT::scalar(64);
+static const LLT S96 = LLT::scalar(96);
+static const LLT S128 = LLT::scalar(128);
+static const LLT S160 = LLT::scalar(160);
+static const LLT S224 = LLT::scalar(224);
+static const LLT S256 = LLT::scalar(256);
+static const LLT S512 = LLT::scalar(512);
+static const LLT MaxScalar = LLT::scalar(MaxRegisterSize);
+
+static const LLT V2S8 = LLT::fixed_vector(2, 8);
+static const LLT V2S16 = LLT::fixed_vector(2, 16);
+static const LLT V4S16 = LLT::fixed_vector(4, 16);
+static const LLT V6S16 = LLT::fixed_vector(6, 16);
+static const LLT V8S16 = LLT::fixed_vector(8, 16);
+static const LLT V10S16 = LLT::fixed_vector(10, 16);
+static const LLT V12S16 = LLT::fixed_vector(12, 16);
+static const LLT V16S16 = LLT::fixed_vector(16, 16);
+
+static const LLT V2S32 = LLT::fixed_vector(2, 32);
+static const LLT V3S32 = LLT::fixed_vector(3, 32);
+static const LLT V4S32 = LLT::fixed_vector(4, 32);
+static const LLT V5S32 = LLT::fixed_vector(5, 32);
+static const LLT V6S32 = LLT::fixed_vector(6, 32);
+static const LLT V7S32 = LLT::fixed_vector(7, 32);
+static const LLT V8S32 = LLT::fixed_vector(8, 32);
+static const LLT V9S32 = LLT::fixed_vector(9, 32);
+static const LLT V10S32 = LLT::fixed_vector(10, 32);
+static const LLT V11S32 = LLT::fixed_vector(11, 32);
+static const LLT V12S32 = LLT::fixed_vector(12, 32);
+static const LLT V16S32 = LLT::fixed_vector(16, 32);
+static const LLT V32S32 = LLT::fixed_vector(32, 32);
+
+static const LLT V2S64 = LLT::fixed_vector(2, 64);
+static const LLT V3S64 = LLT::fixed_vector(3, 64);
+static const LLT V4S64 = LLT::fixed_vector(4, 64);
+static const LLT V5S64 = LLT::fixed_vector(5, 64);
+static const LLT V6S64 = LLT::fixed_vector(6, 64);
+static const LLT V7S64 = LLT::fixed_vector(7, 64);
+static const LLT V8S64 = LLT::fixed_vector(8, 64);
+static const LLT V16S64 = LLT::fixed_vector(16, 64);
+
+static const LLT V2S128 = LLT::fixed_vector(2, 128);
+static const LLT V4S128 = LLT::fixed_vector(4, 128);
+
+static std::initializer_list<LLT> AllScalarTypes = {S32, S64, S96, S128,
+ S160, S224, S256, S512};
+
+static std::initializer_list<LLT> AllS16Vectors{
+ V2S16, V4S16, V6S16, V8S16, V10S16, V12S16, V16S16, V2S128, V4S128};
+
+static std::initializer_list<LLT> AllS32Vectors = {
+ V2S32, V3S32, V4S32, V5S32, V6S32, V7S32, V8S32,
+ V9S32, V10S32, V11S32, V12S32, V16S32, V32S32};
+
+static std::initializer_list<LLT> AllS64Vectors = {V2S64, V3S64, V4S64, V5S64,
+ V6S64, V7S64, V8S64, V16S64};
+
+// Checks whether a type is in the list of legal register types.
+static bool isRegisterClassType(LLT Ty) {
+ if (Ty.isPointerOrPointerVector())
+ Ty = Ty.changeElementType(LLT::scalar(Ty.getScalarSizeInBits()));
+
+ return is_contained(AllS32Vectors, Ty) || is_contained(AllS64Vectors, Ty) ||
+ is_contained(AllScalarTypes, Ty) || is_contained(AllS16Vectors, Ty);
+}
+
+static LegalityPredicate isRegisterClassType(unsigned TypeIdx) {
+ return [TypeIdx](const LegalityQuery &Query) {
+ return isRegisterClassType(Query.Types[TypeIdx]);
+ };
+}
+
// If we have a truncating store or an extending load with a data size larger
// than 32-bits, we need to reduce to a 32-bit type.
static LegalityPredicate isWideScalarExtLoadTruncStore(unsigned TypeIdx) {
@@ -416,11 +495,10 @@ static bool loadStoreBitcastWorkaround(const LLT Ty) {
if (!Ty.isVector())
return true;
- LLT EltTy = Ty.getElementType();
- if (EltTy.isPointer())
+ if (Ty.isPointerVector())
return true;
- unsigned EltSize = EltTy.getSizeInBits();
+ unsigned EltSize = Ty.getScalarSizeInBits();
return EltSize != 32 && EltSize != 64;
}
@@ -578,52 +656,6 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
return LLT::pointer(AS, TM.getPointerSizeInBits(AS));
};
- const LLT S1 = LLT::scalar(1);
- const LLT S8 = LLT::scalar(8);
- const LLT S16 = LLT::scalar(16);
- const LLT S32 = LLT::scalar(32);
- const LLT S64 = LLT::scalar(64);
- const LLT S128 = LLT::scalar(128);
- const LLT S256 = LLT::scalar(256);
- const LLT S512 = LLT::scalar(512);
- const LLT MaxScalar = LLT::scalar(MaxRegisterSize);
-
- const LLT V2S8 = LLT::fixed_vector(2, 8);
- const LLT V2S16 = LLT::fixed_vector(2, 16);
- const LLT V4S16 = LLT::fixed_vector(4, 16);
-
- const LLT V2S32 = LLT::fixed_vector(2, 32);
- const LLT V3S32 = LLT::fixed_vector(3, 32);
- const LLT V4S32 = LLT::fixed_vector(4, 32);
- const LLT V5S32 = LLT::fixed_vector(5, 32);
- const LLT V6S32 = LLT::fixed_vector(6, 32);
- const LLT V7S32 = LLT::fixed_vector(7, 32);
- const LLT V8S32 = LLT::fixed_vector(8, 32);
- const LLT V9S32 = LLT::fixed_vector(9, 32);
- const LLT V10S32 = LLT::fixed_vector(10, 32);
- const LLT V11S32 = LLT::fixed_vector(11, 32);
- const LLT V12S32 = LLT::fixed_vector(12, 32);
- const LLT V13S32 = LLT::fixed_vector(13, 32);
- const LLT V14S32 = LLT::fixed_vector(14, 32);
- const LLT V15S32 = LLT::fixed_vector(15, 32);
- const LLT V16S32 = LLT::fixed_vector(16, 32);
- const LLT V32S32 = LLT::fixed_vector(32, 32);
-
- const LLT V2S64 = LLT::fixed_vector(2, 64);
- const LLT V3S64 = LLT::fixed_vector(3, 64);
- const LLT V4S64 = LLT::fixed_vector(4, 64);
- const LLT V5S64 = LLT::fixed_vector(5, 64);
- const LLT V6S64 = LLT::fixed_vector(6, 64);
- const LLT V7S64 = LLT::fixed_vector(7, 64);
- const LLT V8S64 = LLT::fixed_vector(8, 64);
- const LLT V16S64 = LLT::fixed_vector(16, 64);
-
- std::initializer_list<LLT> AllS32Vectors =
- {V2S32, V3S32, V4S32, V5S32, V6S32, V7S32, V8S32,
- V9S32, V10S32, V11S32, V12S32, V13S32, V14S32, V15S32, V16S32, V32S32};
- std::initializer_list<LLT> AllS64Vectors =
- {V2S64, V3S64, V4S64, V5S64, V6S64, V7S64, V8S64, V16S64};
-
const LLT GlobalPtr = GetAddrSpacePtr(AMDGPUAS::GLOBAL_ADDRESS);
const LLT ConstantPtr = GetAddrSpacePtr(AMDGPUAS::CONSTANT_ADDRESS);
const LLT Constant32Ptr = GetAddrSpacePtr(AMDGPUAS::CONSTANT_ADDRESS_32BIT);
@@ -836,10 +868,9 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
.scalarize(0);
getActionDefinitionsBuilder(G_BITCAST)
- // Don't worry about the size constraint.
- .legalIf(all(isRegisterType(0), isRegisterType(1)))
- .lower();
-
+ // Don't worry about the size constraint.
+ .legalIf(all(isRegisterClassType(0), isRegisterClassType(1)))
+ .lower();
getActionDefinitionsBuilder(G_CONSTANT)
.legalFor({S1, S32, S64, S16, GlobalPtr,
@@ -1957,6 +1988,8 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
getActionDefinitionsBuilder(G_READCYCLECOUNTER)
.legalFor({S64});
+ getActionDefinitionsBuilder(G_READSTEADYCOUNTER).legalFor({S64});
+
getActionDefinitionsBuilder(G_FENCE)
.alwaysLegal();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp
index 015c7108..bc58407 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp
@@ -145,7 +145,6 @@ static bool lowerKernelArguments(Function &F, const TargetMachine &TM) {
// Try to preload this argument into user SGPRs.
if (Arg.hasInRegAttr() && InPreloadSequence && ST.hasKernargPreload() &&
- !ST.needsKernargPreloadBackwardsCompatibility() &&
!Arg.getType()->isAggregateType())
if (PreloadInfo.tryAllocPreloadSGPRs(AllocSize, EltOffset,
LastExplicitArgOffset))
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 5323e4f..b174d57 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -4051,6 +4051,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case AMDGPU::G_CONSTANT:
case AMDGPU::G_GLOBAL_VALUE:
case AMDGPU::G_BLOCK_ADDR:
+ case AMDGPU::G_READSTEADYCOUNTER:
case AMDGPU::G_READCYCLECOUNTER: {
unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size);
diff --git a/llvm/lib/Target/AMDGPU/DSDIRInstructions.td b/llvm/lib/Target/AMDGPU/DSDIRInstructions.td
index 757845a..3ef765f 100644
--- a/llvm/lib/Target/AMDGPU/DSDIRInstructions.td
+++ b/llvm/lib/Target/AMDGPU/DSDIRInstructions.td
@@ -159,8 +159,8 @@ def : GCNPat <
// GFX11
//===----------------------------------------------------------------------===//
-multiclass DSDIR_Real_gfx11<bits<2> op,
- DSDIR_Pseudo lds = !cast<DSDIR_Pseudo>(NAME)> {
+multiclass DSDIR_Real_gfx11<bits<2> op> {
+ defvar lds = !cast<DSDIR_Pseudo>(NAME);
def _gfx11 : DSDIR_Real<lds, lds.InOperandList,
LDSDIR_getAsm<lds.is_direct>.ret,
SIEncodingFamily.GFX11>,
@@ -177,8 +177,8 @@ defm LDS_DIRECT_LOAD : DSDIR_Real_gfx11<0x1>;
// GFX12+
//===----------------------------------------------------------------------===//
-multiclass DSDIR_Real_gfx12<bits<2> op,
- DSDIR_Pseudo lds = !cast<DSDIR_Pseudo>(NAME)> {
+multiclass DSDIR_Real_gfx12<bits<2> op> {
+ defvar lds = !cast<DSDIR_Pseudo>(NAME);
def _gfx12 : DSDIR_Real<lds, lds.InOperandList,
VDSDIR_getAsm<lds.is_direct>.ret,
SIEncodingFamily.GFX12>,
diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
index 85377d0..b307865 100644
--- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
+++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
@@ -178,8 +178,12 @@ static DecodeStatus decodeAV10(MCInst &Inst, unsigned Imm, uint64_t /* Addr */,
}
// Decoder for Src(9-bit encoding) registers only.
-#define DECODE_OPERAND_SRC_REG_9(RegClass, OpWidth) \
- DECODE_SrcOp(decodeOperand_##RegClass, 9, OpWidth, Imm, false, 0)
+template <AMDGPUDisassembler::OpWidthTy OpWidth>
+static DecodeStatus decodeSrcReg9(MCInst &Inst, unsigned Imm,
+ uint64_t /* Addr */,
+ const MCDisassembler *Decoder) {
+ return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm, false, 0, Decoder);
+}
// Decoder for Src(9-bit encoding) AGPR, register number encoded in 9bits, set
// Imm{9} to 1 (set acc) and decode using 'enum10' from decodeSrcOp, registers
@@ -204,22 +208,29 @@ static DecodeStatus decodeSrcAV10(MCInst &Inst, unsigned Imm,
// will be decoded and InstPrinter will report warning. Immediate will be
// decoded into constant of size ImmWidth, should match width of immediate used
// by OperandType (important for floating point types).
-#define DECODE_OPERAND_SRC_REG_OR_IMM_9(RegClass, OpWidth, ImmWidth) \
- DECODE_SrcOp(decodeOperand_##RegClass##_Imm##ImmWidth, 9, OpWidth, Imm, \
- false, ImmWidth)
-
-#define DECODE_OPERAND_SRC_REG_OR_IMM_9_TYPED(Name, OpWidth, ImmWidth) \
- DECODE_SrcOp(decodeOperand_##Name, 9, OpWidth, Imm, false, ImmWidth)
+template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth>
+static DecodeStatus decodeSrcRegOrImm9(MCInst &Inst, unsigned Imm,
+ uint64_t /* Addr */,
+ const MCDisassembler *Decoder) {
+ return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm, false, ImmWidth, Decoder);
+}
// Decoder for Src(9-bit encoding) AGPR or immediate. Set Imm{9} to 1 (set acc)
// and decode using 'enum10' from decodeSrcOp.
-#define DECODE_OPERAND_SRC_REG_OR_IMM_A9(RegClass, OpWidth, ImmWidth) \
- DECODE_SrcOp(decodeOperand_##RegClass##_Imm##ImmWidth, 9, OpWidth, \
- Imm | 512, false, ImmWidth)
+template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth>
+static DecodeStatus decodeSrcRegOrImmA9(MCInst &Inst, unsigned Imm,
+ uint64_t /* Addr */,
+ const MCDisassembler *Decoder) {
+ return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm | 512, false, ImmWidth,
+ Decoder);
+}
-#define DECODE_OPERAND_SRC_REG_OR_IMM_DEFERRED_9(RegClass, OpWidth, ImmWidth) \
- DECODE_SrcOp(decodeOperand_##RegClass##_Deferred##_Imm##ImmWidth, 9, \
- OpWidth, Imm, true, ImmWidth)
+template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth>
+static DecodeStatus decodeSrcRegOrImmDeferred9(MCInst &Inst, unsigned Imm,
+ uint64_t /* Addr */,
+ const MCDisassembler *Decoder) {
+ return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm, true, ImmWidth, Decoder);
+}
// Default decoders generated by tablegen: 'Decode<RegClass>RegisterClass'
// when RegisterClass is used as an operand. Most often used for destination
@@ -255,51 +266,6 @@ DECODE_OPERAND_REG_8(AReg_256)
DECODE_OPERAND_REG_8(AReg_512)
DECODE_OPERAND_REG_8(AReg_1024)
-// Decoders for register only source RegisterOperands that use use 9-bit Src
-// encoding: 'decodeOperand_<RegClass>'.
-
-DECODE_OPERAND_SRC_REG_9(VGPR_32, OPW32)
-DECODE_OPERAND_SRC_REG_9(VReg_64, OPW64)
-DECODE_OPERAND_SRC_REG_9(VReg_128, OPW128)
-DECODE_OPERAND_SRC_REG_9(VReg_256, OPW256)
-DECODE_OPERAND_SRC_REG_9(VRegOrLds_32, OPW32)
-
-// Decoders for register or immediate RegisterOperands that use 9-bit Src
-// encoding: 'decodeOperand_<RegClass>_Imm<ImmWidth>'.
-
-DECODE_OPERAND_SRC_REG_OR_IMM_9(SReg_64, OPW64, 64)
-DECODE_OPERAND_SRC_REG_OR_IMM_9(SReg_32, OPW32, 32)
-DECODE_OPERAND_SRC_REG_OR_IMM_9(SReg_32, OPW32, 16)
-DECODE_OPERAND_SRC_REG_OR_IMM_9(SRegOrLds_32, OPW32, 32)
-DECODE_OPERAND_SRC_REG_OR_IMM_9(VS_32_Lo128, OPW16, 16)
-DECODE_OPERAND_SRC_REG_OR_IMM_9(VS_32, OPW32, 16)
-DECODE_OPERAND_SRC_REG_OR_IMM_9(VS_32, OPW32, 32)
-DECODE_OPERAND_SRC_REG_OR_IMM_9(VS_64, OPW64, 64)
-DECODE_OPERAND_SRC_REG_OR_IMM_9(VS_64, OPW64, 32)
-DECODE_OPERAND_SRC_REG_OR_IMM_9(VReg_64, OPW64, 64)
-DECODE_OPERAND_SRC_REG_OR_IMM_9(VReg_64, OPW64, 32)
-DECODE_OPERAND_SRC_REG_OR_IMM_9(VReg_64, OPW64, 16)
-DECODE_OPERAND_SRC_REG_OR_IMM_9(VReg_128, OPW128, 32)
-DECODE_OPERAND_SRC_REG_OR_IMM_9(VReg_128, OPW128, 16)
-DECODE_OPERAND_SRC_REG_OR_IMM_9(VReg_256, OPW256, 64)
-DECODE_OPERAND_SRC_REG_OR_IMM_9(VReg_256, OPW256, 32)
-DECODE_OPERAND_SRC_REG_OR_IMM_9(VReg_512, OPW512, 32)
-DECODE_OPERAND_SRC_REG_OR_IMM_9(VReg_1024, OPW1024, 32)
-
-DECODE_OPERAND_SRC_REG_OR_IMM_9_TYPED(VS_32_ImmV2I16, OPW32, 32)
-DECODE_OPERAND_SRC_REG_OR_IMM_9_TYPED(VS_32_ImmV2F16, OPW32, 16)
-
-DECODE_OPERAND_SRC_REG_OR_IMM_A9(AReg_64, OPW64, 64)
-DECODE_OPERAND_SRC_REG_OR_IMM_A9(AReg_128, OPW128, 32)
-DECODE_OPERAND_SRC_REG_OR_IMM_A9(AReg_256, OPW256, 64)
-DECODE_OPERAND_SRC_REG_OR_IMM_A9(AReg_512, OPW512, 32)
-DECODE_OPERAND_SRC_REG_OR_IMM_A9(AReg_1024, OPW1024, 32)
-
-DECODE_OPERAND_SRC_REG_OR_IMM_DEFERRED_9(VS_32_Lo128, OPW16, 16)
-DECODE_OPERAND_SRC_REG_OR_IMM_DEFERRED_9(VS_32, OPW16, 16)
-DECODE_OPERAND_SRC_REG_OR_IMM_DEFERRED_9(VS_32, OPW32, 32)
-DECODE_OPERAND_SRC_REG_OR_IMM_DEFERRED_9(SReg_32, OPW32, 32)
-
static DecodeStatus DecodeVGPR_16RegisterClass(MCInst &Inst, unsigned Imm,
uint64_t /*Addr*/,
const MCDisassembler *Decoder) {
diff --git a/llvm/lib/Target/AMDGPU/EXPInstructions.td b/llvm/lib/Target/AMDGPU/EXPInstructions.td
index 4cfee7d..0a1e544 100644
--- a/llvm/lib/Target/AMDGPU/EXPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/EXPInstructions.td
@@ -10,7 +10,7 @@
// EXP classes
//===----------------------------------------------------------------------===//
-class EXPCommon<bit row, bit done, string asm = ""> : InstSI<
+class EXPCommon<bit _row, bit _done, string asm = ""> : InstSI<
(outs),
(ins exp_tgt:$tgt,
ExpSrc0:$src0, ExpSrc1:$src1, ExpSrc2:$src2, ExpSrc3:$src3,
@@ -18,13 +18,16 @@ class EXPCommon<bit row, bit done, string asm = ""> : InstSI<
asm> {
let EXP = 1;
let EXP_CNT = 1;
- let mayLoad = done;
+ let mayLoad = _done;
let mayStore = 1;
let maybeAtomic = 0;
let UseNamedOperandTable = 1;
- let Uses = !if(row, [EXEC, M0], [EXEC]);
+ let Uses = !if(_row, [EXEC, M0], [EXEC]);
let SchedRW = [WriteExport];
let DisableWQM = 1;
+
+ bit row = _row;
+ bit done = _done;
}
class EXP_Pseudo<bit row, bit done>
@@ -34,17 +37,17 @@ class EXP_Pseudo<bit row, bit done>
}
// Real instruction with optional asm operands "compr" and "vm".
-class EXP_Real_ComprVM<bit done, string pseudo, int subtarget>
- : EXPCommon<0, done, "exp$tgt $src0, $src1, $src2, $src3"
- #!if(done, " done", "")#"$compr$vm">,
+class EXP_Real_ComprVM<string pseudo, int subtarget, EXP_Pseudo ps = !cast<EXP_Pseudo>(pseudo)>
+ : EXPCommon<0, ps.done, "exp$tgt $src0, $src1, $src2, $src3"
+ #!if(ps.done, " done", "")#"$compr$vm">,
SIMCInstr<pseudo, subtarget> {
let AsmMatchConverter = "cvtExp";
}
// Real instruction with optional asm operand "row_en".
-class EXP_Real_Row<bit row, bit done, string pseudo, int subtarget, string name = "exp">
- : EXPCommon<row, done, name#"$tgt $src0, $src1, $src2, $src3"
- #!if(done, " done", "")#!if(row, " row_en", "")>,
+class EXP_Real_Row<string pseudo, int subtarget, string name = "exp", EXP_Pseudo ps = !cast<EXP_Pseudo>(pseudo)>
+ : EXPCommon<ps.row, ps.done, name#"$tgt $src0, $src1, $src2, $src3"
+ #!if(ps.done, " done", "")#!if(ps.row, " row_en", "")>,
SIMCInstr<pseudo, subtarget> {
let AsmMatchConverter = "cvtExp";
}
@@ -63,82 +66,69 @@ def EXP_ROW_DONE : EXP_Pseudo<1, 1>;
} // let SubtargetPredicate = isNotGFX90APlus
//===----------------------------------------------------------------------===//
-// SI
+// SI, VI, GFX10.
//===----------------------------------------------------------------------===//
-class EXP_Real_si<bit _done, string pseudo>
- : EXP_Real_ComprVM<_done, pseudo, SIEncodingFamily.SI>, EXPe_ComprVM {
- let AssemblerPredicate = isGFX6GFX7;
- let DecoderNamespace = "GFX6GFX7";
- let done = _done;
+multiclass EXP_Real_si {
+ defvar ps = !cast<EXP_Pseudo>(NAME);
+ def _si : EXP_Real_ComprVM<NAME, SIEncodingFamily.SI>, EXPe_ComprVM {
+ let AssemblerPredicate = isGFX6GFX7;
+ let DecoderNamespace = "GFX6GFX7";
+ let done = ps.done;
+ }
}
-def EXP_si : EXP_Real_si<0, "EXP">;
-def EXP_DONE_si : EXP_Real_si<1, "EXP_DONE">;
-
-//===----------------------------------------------------------------------===//
-// VI
-//===----------------------------------------------------------------------===//
-
-class EXP_Real_vi<bit _done, string pseudo>
- : EXP_Real_ComprVM<_done, pseudo, SIEncodingFamily.VI>, EXPe_vi {
- let AssemblerPredicate = isGFX8GFX9;
- let SubtargetPredicate = isNotGFX90APlus;
- let DecoderNamespace = "GFX8";
- let done = _done;
+multiclass EXP_Real_vi {
+ defvar ps = !cast<EXP_Pseudo>(NAME);
+ def _vi : EXP_Real_ComprVM<NAME, SIEncodingFamily.VI>, EXPe_vi {
+ let AssemblerPredicate = isGFX8GFX9;
+ let SubtargetPredicate = isNotGFX90APlus;
+ let DecoderNamespace = "GFX8";
+ let done = ps.done;
+ }
}
-def EXP_vi : EXP_Real_vi<0, "EXP">;
-def EXP_DONE_vi : EXP_Real_vi<1, "EXP_DONE">;
-
-//===----------------------------------------------------------------------===//
-// GFX10
-//===----------------------------------------------------------------------===//
-
-class EXP_Real_gfx10<bit _done, string pseudo>
- : EXP_Real_ComprVM<_done, pseudo, SIEncodingFamily.GFX10>, EXPe_ComprVM {
- let AssemblerPredicate = isGFX10Only;
- let DecoderNamespace = "GFX10";
- let done = _done;
+multiclass EXP_Real_gfx10 {
+ defvar ps = !cast<EXP_Pseudo>(NAME);
+ def _gfx10 : EXP_Real_ComprVM<NAME, SIEncodingFamily.GFX10>, EXPe_ComprVM {
+ let AssemblerPredicate = isGFX10Only;
+ let DecoderNamespace = "GFX10";
+ let done = ps.done;
+ }
}
-def EXP_gfx10 : EXP_Real_gfx10<0, "EXP">;
-def EXP_DONE_gfx10 : EXP_Real_gfx10<1, "EXP_DONE">;
+defm EXP : EXP_Real_si, EXP_Real_vi, EXP_Real_gfx10;
+defm EXP_DONE : EXP_Real_si, EXP_Real_vi, EXP_Real_gfx10;
//===----------------------------------------------------------------------===//
-// GFX11
+// GFX11, GFX12.
//===----------------------------------------------------------------------===//
-class EXP_Real_gfx11<bit _row, bit _done, string pseudo>
- : EXP_Real_Row<_row, _done, pseudo, SIEncodingFamily.GFX11>, EXPe_Row {
- let AssemblerPredicate = isGFX11Only;
- let DecoderNamespace = "GFX11";
- let row = _row;
- let done = _done;
+multiclass EXP_Real_gfx11 {
+ defvar ps = !cast<EXP_Pseudo>(NAME);
+ def _gfx11 : EXP_Real_Row<NAME, SIEncodingFamily.GFX11>, EXPe_Row {
+ let AssemblerPredicate = isGFX11Only;
+ let DecoderNamespace = "GFX11";
+ let row = ps.row;
+ let done = ps.done;
+ }
}
-def EXP_gfx11 : EXP_Real_gfx11<0, 0, "EXP">;
-def EXP_DONE_gfx11 : EXP_Real_gfx11<0, 1, "EXP_DONE">;
-def EXP_ROW_gfx11 : EXP_Real_gfx11<1, 0, "EXP_ROW">;
-def EXP_ROW_DONE_gfx11 : EXP_Real_gfx11<1, 1, "EXP_ROW_DONE">;
-
-//===----------------------------------------------------------------------===//
-// GFX12+
-//===----------------------------------------------------------------------===//
-
-class VEXPORT_Real_gfx12<bit _row, bit _done, string pseudo>
- : EXP_Real_Row<_row, _done, pseudo, SIEncodingFamily.GFX12, "export">,
+multiclass VEXPORT_Real_gfx12 {
+ defvar ps = !cast<EXP_Pseudo>(NAME);
+ def _gfx12 : EXP_Real_Row<NAME, SIEncodingFamily.GFX12, "export">,
EXPe_Row, MnemonicAlias<"exp", "export">, Requires<[isGFX12Plus]> {
- let AssemblerPredicate = isGFX12Plus;
- let DecoderNamespace = "GFX12";
- let row = _row;
- let done = _done;
+ let AssemblerPredicate = isGFX12Only;
+ let DecoderNamespace = "GFX12";
+ let row = ps.row;
+ let done = ps.done;
+ }
}
-def EXPORT_gfx12 : VEXPORT_Real_gfx12<0, 0, "EXP">;
-def EXPORT_DONE_gfx12 : VEXPORT_Real_gfx12<0, 1, "EXP_DONE">;
-def EXPORT_ROW_gfx12 : VEXPORT_Real_gfx12<1, 0, "EXP_ROW">;
-def EXPORT_ROW_DONE_gfx12 : VEXPORT_Real_gfx12<1, 1, "EXP_ROW_DONE">;
+defm EXP : EXP_Real_gfx11, VEXPORT_Real_gfx12;
+defm EXP_DONE : EXP_Real_gfx11, VEXPORT_Real_gfx12;
+defm EXP_ROW : EXP_Real_gfx11, VEXPORT_Real_gfx12;
+defm EXP_ROW_DONE : EXP_Real_gfx11, VEXPORT_Real_gfx12;
//===----------------------------------------------------------------------===//
// EXP Patterns
diff --git a/llvm/lib/Target/AMDGPU/GCNProcessors.td b/llvm/lib/Target/AMDGPU/GCNProcessors.td
index 4671e03..f7e0919 100644
--- a/llvm/lib/Target/AMDGPU/GCNProcessors.td
+++ b/llvm/lib/Target/AMDGPU/GCNProcessors.td
@@ -258,12 +258,12 @@ def : ProcessorModel<"gfx1036", GFX10SpeedModel,
>;
// [gfx1010, gfx1011, gfx1012, gfx1013]
-def : ProcessorModel<"gfx10.1-generic", GFX10SpeedModel,
+def : ProcessorModel<"gfx10-1-generic", GFX10SpeedModel,
FeatureISAVersion10_1_Generic.Features
>;
// [gfx1030, gfx1031, gfx1032, gfx1033, gfx1034, gfx1035, gfx1036]
-def : ProcessorModel<"gfx10.3-generic", GFX10SpeedModel,
+def : ProcessorModel<"gfx10-3-generic", GFX10SpeedModel,
FeatureISAVersion10_3_Generic.Features
>;
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index b13b4f7..c8e1b15 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -1258,12 +1258,6 @@ public:
// \returns true if preloading kernel arguments is supported.
bool hasKernargPreload() const { return KernargPreload; }
- // \returns true if we need to generate backwards compatible code when
- // preloading kernel arguments.
- bool needsKernargPreloadBackwardsCompatibility() const {
- return hasKernargPreload() && !hasGFX940Insts();
- }
-
// \returns true if the target has split barriers feature
bool hasSplitBarriers() const { return getGeneration() >= GFX12; }
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp
index a25622c..4742b0b 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp
@@ -782,18 +782,26 @@ bool AMDGPUTargetELFStreamer::EmitHSAMetadata(msgpack::Document &HSAMetadataDoc,
}
bool AMDGPUTargetAsmStreamer::EmitKernargPreloadHeader(
- const MCSubtargetInfo &STI) {
- for (int i = 0; i < 64; ++i) {
+ const MCSubtargetInfo &STI, bool TrapEnabled) {
+ const char *TrapInstr = TrapEnabled ? "\ts_trap 2" : "\ts_endpgm";
+ OS << TrapInstr
+ << " ; Trap with incompatible firmware that doesn't "
+ "support preloading kernel arguments.\n";
+ for (int i = 0; i < 63; ++i) {
OS << "\ts_nop 0\n";
}
return true;
}
bool AMDGPUTargetELFStreamer::EmitKernargPreloadHeader(
- const MCSubtargetInfo &STI) {
+ const MCSubtargetInfo &STI, bool TrapEnabled) {
const uint32_t Encoded_s_nop = 0xbf800000;
+ const uint32_t Encoded_s_trap = 0xbf920002;
+ const uint32_t Encoded_s_endpgm = 0xbf810000;
+ const uint32_t TrapInstr = TrapEnabled ? Encoded_s_trap : Encoded_s_endpgm;
MCStreamer &OS = getStreamer();
- for (int i = 0; i < 64; ++i) {
+ OS.emitInt32(TrapInstr);
+ for (int i = 0; i < 63; ++i) {
OS.emitInt32(Encoded_s_nop);
}
return true;
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h
index ad5f27a..5aa80ff 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h
@@ -89,7 +89,8 @@ public:
virtual bool EmitCodeEnd(const MCSubtargetInfo &STI) { return true; }
/// \returns True on success, false on failure.
- virtual bool EmitKernargPreloadHeader(const MCSubtargetInfo &STI) {
+ virtual bool EmitKernargPreloadHeader(const MCSubtargetInfo &STI,
+ bool TrapEnabled) {
return true;
}
@@ -146,7 +147,8 @@ public:
bool EmitCodeEnd(const MCSubtargetInfo &STI) override;
/// \returns True on success, false on failure.
- bool EmitKernargPreloadHeader(const MCSubtargetInfo &STI) override;
+ bool EmitKernargPreloadHeader(const MCSubtargetInfo &STI,
+ bool TrapEnabled) override;
void EmitAmdhsaKernelDescriptor(
const MCSubtargetInfo &STI, StringRef KernelName,
@@ -200,7 +202,8 @@ public:
bool EmitCodeEnd(const MCSubtargetInfo &STI) override;
/// \returns True on success, false on failure.
- bool EmitKernargPreloadHeader(const MCSubtargetInfo &STI) override;
+ bool EmitKernargPreloadHeader(const MCSubtargetInfo &STI,
+ bool TrapEnabled) override;
void EmitAmdhsaKernelDescriptor(
const MCSubtargetInfo &STI, StringRef KernelName,
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index a64a9e6..56f0e71 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -468,6 +468,10 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
// On SI this is s_memtime and s_memrealtime on VI.
setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
+
+ if (Subtarget->hasSMemRealTime() ||
+ Subtarget->getGeneration() >= AMDGPUSubtarget::GFX11)
+ setOperationAction(ISD::READSTEADYCOUNTER, MVT::i64, Legal);
setOperationAction({ISD::TRAP, ISD::DEBUGTRAP}, MVT::Other, Custom);
if (Subtarget->has16BitInsts()) {
@@ -2826,8 +2830,7 @@ SDValue SITargetLowering::LowerFormalArguments(
if (IsEntryFunc) {
allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info);
- if (IsKernel && Subtarget->hasKernargPreload() &&
- !Subtarget->needsKernargPreloadBackwardsCompatibility())
+ if (IsKernel && Subtarget->hasKernargPreload())
allocatePreloadKernArgSGPRs(CCInfo, ArgLocs, Ins, MF, *TRI, *Info);
allocateLDSKernelId(CCInfo, MF, *TRI, *Info);
diff --git a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
index 84b9330..f62e808 100644
--- a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
@@ -2030,8 +2030,11 @@ bool SIGfx10CacheControl::insertAcquire(MachineBasicBlock::iterator &MI,
switch (Scope) {
case SIAtomicScope::SYSTEM:
case SIAtomicScope::AGENT:
- BuildMI(MBB, MI, DL, TII->get(AMDGPU::BUFFER_GL0_INV));
+ // The order of invalidates matter here. We must invalidate "outer in"
+ // so L1 -> L0 to avoid L0 pulling in stale data from L1 when it is
+ // invalidated.
BuildMI(MBB, MI, DL, TII->get(AMDGPU::BUFFER_GL1_INV));
+ BuildMI(MBB, MI, DL, TII->get(AMDGPU::BUFFER_GL0_INV));
Changed = true;
break;
case SIAtomicScope::WORKGROUP:
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
index aabb6c2..d4a1e8d1 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
@@ -1101,96 +1101,106 @@ class RegImmMatcher<string name> : AsmOperandClass {
let RenderMethod = "addRegOrImmOperands";
}
-class RegOrImmOperand <string RegisterClassName, string OperandTypeName,
- string decoderImmSize>
- : RegisterOperand<!cast<RegisterClass>(RegisterClassName)> {
+class RegOrImmOperand <RegisterClass RegClass, string OperandTypeName>
+ : RegisterOperand<RegClass> {
let OperandNamespace = "AMDGPU";
let OperandType = OperandTypeName;
let ParserMatchClass = RegImmMatcher<!subst("_Deferred", "", NAME)>;
- let DecoderMethod = "decodeOperand_" # RegisterClassName # decoderImmSize;
}
//===----------------------------------------------------------------------===//
// SSrc_* Operands with an SGPR or a 32-bit immediate
//===----------------------------------------------------------------------===//
-def SSrc_b16 : RegOrImmOperand <"SReg_32", "OPERAND_REG_IMM_INT16", "_Imm16">;
-def SSrc_f16 : RegOrImmOperand <"SReg_32", "OPERAND_REG_IMM_FP16", "_Imm16">;
-def SSrc_b32 : RegOrImmOperand <"SReg_32", "OPERAND_REG_IMM_INT32", "_Imm32">;
-def SSrc_f32 : RegOrImmOperand <"SReg_32", "OPERAND_REG_IMM_FP32", "_Imm32">;
-def SSrc_b64 : RegOrImmOperand <"SReg_64", "OPERAND_REG_IMM_INT64", "_Imm64">;
+class SrcRegOrImm9<RegisterClass regClass, string opWidth, string operandType,
+ int immWidth> : RegOrImmOperand<regClass, operandType> {
+ let DecoderMethod = "decodeSrcRegOrImm9<AMDGPUDisassembler::" # opWidth #
+ ", " # immWidth # ">";
+}
+
+def SSrc_b16 : SrcRegOrImm9 <SReg_32, "OPW32", "OPERAND_REG_IMM_INT16", 16>;
+def SSrc_f16 : SrcRegOrImm9 <SReg_32, "OPW32", "OPERAND_REG_IMM_FP16", 16>;
+def SSrc_b32 : SrcRegOrImm9 <SReg_32, "OPW32", "OPERAND_REG_IMM_INT32", 32>;
+def SSrc_f32 : SrcRegOrImm9 <SReg_32, "OPW32", "OPERAND_REG_IMM_FP32", 32>;
+def SSrc_b64 : SrcRegOrImm9 <SReg_64, "OPW64", "OPERAND_REG_IMM_INT64", 64>;
-def SSrcOrLds_b32 : RegOrImmOperand <"SRegOrLds_32", "OPERAND_REG_IMM_INT32", "_Imm32">;
+def SSrcOrLds_b32 : SrcRegOrImm9 <SRegOrLds_32, "OPW32", "OPERAND_REG_IMM_INT32", 32>;
//===----------------------------------------------------------------------===//
// SSrc_32_Deferred Operands with an SGPR or a 32-bit immediate for use with
// FMAMK/FMAAK
//===----------------------------------------------------------------------===//
-def SSrc_f32_Deferred : RegOrImmOperand<"SReg_32", "OPERAND_REG_IMM_FP32_DEFERRED", "_Deferred_Imm32">;
+class SrcRegOrImmDeferred9<RegisterClass regClass, string opWidth,
+ string operandType, int immWidth>
+ : RegOrImmOperand<regClass, operandType> {
+ let DecoderMethod = "decodeSrcRegOrImmDeferred9<AMDGPUDisassembler::" #
+ opWidth # ", " # immWidth # ">";
+}
+
+def SSrc_f32_Deferred : SrcRegOrImmDeferred9<SReg_32, "OPW32", "OPERAND_REG_IMM_FP32_DEFERRED", 32>;
//===----------------------------------------------------------------------===//
// SCSrc_* Operands with an SGPR or a inline constant
//===----------------------------------------------------------------------===//
-def SCSrc_b32 : RegOrImmOperand <"SReg_32", "OPERAND_REG_INLINE_C_INT32", "_Imm32">;
-def SCSrc_b64 : RegOrImmOperand <"SReg_64", "OPERAND_REG_INLINE_C_INT64", "_Imm64">;
+def SCSrc_b32 : SrcRegOrImm9 <SReg_32, "OPW32", "OPERAND_REG_INLINE_C_INT32", 32>;
+def SCSrc_b64 : SrcRegOrImm9 <SReg_64, "OPW64", "OPERAND_REG_INLINE_C_INT64", 64>;
//===----------------------------------------------------------------------===//
// VSrc_* Operands with an SGPR, VGPR or a 32-bit immediate
//===----------------------------------------------------------------------===//
// The current and temporary future default used case for VOP3.
-def VSrc_b16 : RegOrImmOperand <"VS_32", "OPERAND_REG_IMM_INT16", "_Imm16">;
-def VSrc_f16 : RegOrImmOperand <"VS_32", "OPERAND_REG_IMM_FP16", "_Imm16">;
+def VSrc_b16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_IMM_INT16", 16>;
+def VSrc_f16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_IMM_FP16", 16>;
// True16 VOP3 operands.
-def VSrcT_b16 : RegOrImmOperand <"VS_16", "OPERAND_REG_IMM_INT16", "_Imm16"> {
+def VSrcT_b16 : RegOrImmOperand <VS_16, "OPERAND_REG_IMM_INT16"> {
let EncoderMethod = "getMachineOpValueT16";
let DecoderMethod = "decodeOperand_VSrcT16";
}
-def VSrcT_f16 : RegOrImmOperand <"VS_16", "OPERAND_REG_IMM_FP16", "_Imm16"> {
+def VSrcT_f16 : RegOrImmOperand <VS_16, "OPERAND_REG_IMM_FP16"> {
let EncoderMethod = "getMachineOpValueT16";
let DecoderMethod = "decodeOperand_VSrcT16";
}
// True16 VOP1/2/C operands.
-def VSrcT_b16_Lo128 : RegOrImmOperand <"VS_16_Lo128", "OPERAND_REG_IMM_INT16", "_Imm16"> {
+def VSrcT_b16_Lo128 : RegOrImmOperand <VS_16_Lo128, "OPERAND_REG_IMM_INT16"> {
let EncoderMethod = "getMachineOpValueT16Lo128";
let DecoderMethod = "decodeOperand_VSrcT16_Lo128";
}
-def VSrcT_f16_Lo128 : RegOrImmOperand <"VS_16_Lo128", "OPERAND_REG_IMM_FP16", "_Imm16"> {
+def VSrcT_f16_Lo128 : RegOrImmOperand <VS_16_Lo128, "OPERAND_REG_IMM_FP16"> {
let EncoderMethod = "getMachineOpValueT16Lo128";
let DecoderMethod = "decodeOperand_VSrcT16_Lo128";
}
// The current and temporary future default used case for fake VOP1/2/C.
// For VOP1,2,C True16 instructions. _Lo128 use first 128 32-bit VGPRs only.
-def VSrcFake16_b16_Lo128 : RegOrImmOperand <"VS_32_Lo128", "OPERAND_REG_IMM_INT16", "_Imm16">;
-def VSrcFake16_f16_Lo128 : RegOrImmOperand <"VS_32_Lo128", "OPERAND_REG_IMM_FP16", "_Imm16">;
-
-def VSrc_b32 : RegOrImmOperand <"VS_32", "OPERAND_REG_IMM_INT32", "_Imm32">;
-def VSrc_f32 : RegOrImmOperand <"VS_32", "OPERAND_REG_IMM_FP32", "_Imm32">;
-def VSrc_v2b16 : RegOrImmOperand <"VS_32", "OPERAND_REG_IMM_V2INT16", "_ImmV2I16">;
-def VSrc_v2f16 : RegOrImmOperand <"VS_32", "OPERAND_REG_IMM_V2FP16", "_ImmV2F16">;
-def VSrc_b64 : RegOrImmOperand <"VS_64", "OPERAND_REG_IMM_INT64", "_Imm64">;
-def VSrc_f64 : RegOrImmOperand <"VS_64", "OPERAND_REG_IMM_FP64", "_Imm64"> {
+def VSrcFake16_b16_Lo128 : SrcRegOrImm9 <VS_32_Lo128, "OPW16", "OPERAND_REG_IMM_INT16", 16>;
+def VSrcFake16_f16_Lo128 : SrcRegOrImm9 <VS_32_Lo128, "OPW16", "OPERAND_REG_IMM_FP16", 16>;
+
+def VSrc_b32 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_IMM_INT32", 32>;
+def VSrc_f32 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_IMM_FP32", 32>;
+def VSrc_v2b16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_IMM_V2INT16", 32>;
+def VSrc_v2f16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_IMM_V2FP16", 16>;
+def VSrc_b64 : SrcRegOrImm9 <VS_64, "OPW64", "OPERAND_REG_IMM_INT64", 64>;
+def VSrc_f64 : SrcRegOrImm9 <VS_64, "OPW64", "OPERAND_REG_IMM_FP64", 64> {
let DecoderMethod = "decodeOperand_VSrc_f64";
}
-def VSrc_v2b32 : RegOrImmOperand <"VS_64", "OPERAND_REG_IMM_V2INT32", "_Imm32">;
-def VSrc_v2f32 : RegOrImmOperand <"VS_64", "OPERAND_REG_IMM_V2FP32", "_Imm32">;
+def VSrc_v2b32 : SrcRegOrImm9 <VS_64, "OPW64", "OPERAND_REG_IMM_V2INT32", 32>;
+def VSrc_v2f32 : SrcRegOrImm9 <VS_64, "OPW64", "OPERAND_REG_IMM_V2FP32", 32>;
//===----------------------------------------------------------------------===//
// VSrc_*_Deferred Operands with an SGPR, VGPR or a 32-bit immediate for use
// with FMAMK/FMAAK
//===----------------------------------------------------------------------===//
-def VSrc_f16_Deferred : RegOrImmOperand<"VS_32", "OPERAND_REG_IMM_FP16_DEFERRED", "_Deferred_Imm16">;
-def VSrc_f32_Deferred : RegOrImmOperand<"VS_32", "OPERAND_REG_IMM_FP32_DEFERRED", "_Deferred_Imm32">;
+def VSrc_f16_Deferred : SrcRegOrImmDeferred9<VS_32, "OPW16", "OPERAND_REG_IMM_FP16_DEFERRED", 16>;
+def VSrc_f32_Deferred : SrcRegOrImmDeferred9<VS_32, "OPW32", "OPERAND_REG_IMM_FP32_DEFERRED", 32>;
-def VSrcFake16_f16_Lo128_Deferred : RegOrImmOperand<"VS_32_Lo128",
- "OPERAND_REG_IMM_FP16_DEFERRED",
- "_Deferred_Imm16">;
+def VSrcFake16_f16_Lo128_Deferred
+ : SrcRegOrImmDeferred9<VS_32_Lo128, "OPW16", "OPERAND_REG_IMM_FP16_DEFERRED", 16>;
//===----------------------------------------------------------------------===//
// VRegSrc_* Operands with a VGPR
@@ -1198,25 +1208,15 @@ def VSrcFake16_f16_Lo128_Deferred : RegOrImmOperand<"VS_32_Lo128",
// This is for operands with the enum(9), VSrc encoding restriction,
// but only allows VGPRs.
-def VRegSrc_32 : RegisterOperand<VGPR_32> {
- let DecoderMethod = "decodeOperand_VGPR_32";
-}
-
-def VRegSrc_64 : RegisterOperand<VReg_64> {
- let DecoderMethod = "decodeOperand_VReg_64";
+class SrcReg9<RegisterClass regClass, string width> : RegisterOperand<regClass> {
+ let DecoderMethod = "decodeSrcReg9<AMDGPUDisassembler::" # width # ">";
}
-def VRegSrc_128 : RegisterOperand<VReg_128> {
- let DecoderMethod = "decodeOperand_VReg_128";
-}
-
-def VRegSrc_256 : RegisterOperand<VReg_256> {
- let DecoderMethod = "decodeOperand_VReg_256";
-}
-
-def VRegOrLdsSrc_32 : RegisterOperand<VRegOrLds_32> {
- let DecoderMethod = "decodeOperand_VRegOrLds_32";
-}
+def VRegSrc_32 : SrcReg9<VGPR_32, "OPW32">;
+def VRegSrc_64 : SrcReg9<VReg_64, "OPW64">;
+def VRegSrc_128: SrcReg9<VReg_128, "OPW128">;
+def VRegSrc_256: SrcReg9<VReg_256, "OPW256">;
+def VRegOrLdsSrc_32 : SrcReg9<VRegOrLds_32, "OPW32">;
//===----------------------------------------------------------------------===//
// VGPRSrc_*
@@ -1257,30 +1257,30 @@ def ARegSrc_32 : AVOperand<AGPR_32, "decodeSrcA9", "OPW32">;
// VCSrc_* Operands with an SGPR, VGPR or an inline constant
//===----------------------------------------------------------------------===//
-def VCSrc_b16 : RegOrImmOperand <"VS_32", "OPERAND_REG_INLINE_C_INT16", "_Imm16">;
-def VCSrc_f16 : RegOrImmOperand <"VS_32", "OPERAND_REG_INLINE_C_FP16", "_Imm16">;
-def VCSrc_b32 : RegOrImmOperand <"VS_32", "OPERAND_REG_INLINE_C_INT32", "_Imm32">;
-def VCSrc_f32 : RegOrImmOperand <"VS_32", "OPERAND_REG_INLINE_C_FP32", "_Imm32">;
-def VCSrc_v2b16 : RegOrImmOperand <"VS_32", "OPERAND_REG_INLINE_C_V2INT16", "_ImmV2I16">;
-def VCSrc_v2f16 : RegOrImmOperand <"VS_32", "OPERAND_REG_INLINE_C_V2FP16", "_ImmV2F16">;
+def VCSrc_b16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_INLINE_C_INT16", 16>;
+def VCSrc_f16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_INLINE_C_FP16", 16>;
+def VCSrc_b32 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_INLINE_C_INT32", 32>;
+def VCSrc_f32 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_INLINE_C_FP32", 32>;
+def VCSrc_v2b16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_INLINE_C_V2INT16", 32>;
+def VCSrc_v2f16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_INLINE_C_V2FP16", 16>;
//===----------------------------------------------------------------------===//
// VISrc_* Operands with a VGPR or an inline constant
//===----------------------------------------------------------------------===//
-def VISrc_64_f16 : RegOrImmOperand <"VReg_64", "OPERAND_REG_INLINE_C_FP16", "_Imm16">;
-def VISrc_64_b32 : RegOrImmOperand <"VReg_64", "OPERAND_REG_INLINE_C_INT32", "_Imm32">;
-def VISrc_64_f64 : RegOrImmOperand <"VReg_64", "OPERAND_REG_INLINE_C_FP64", "_Imm64">;
-def VISrc_128_f16 : RegOrImmOperand <"VReg_128", "OPERAND_REG_INLINE_C_FP16", "_Imm16">;
-def VISrc_128_b32 : RegOrImmOperand <"VReg_128", "OPERAND_REG_INLINE_C_INT32", "_Imm32">;
-def VISrc_128_f32 : RegOrImmOperand <"VReg_128", "OPERAND_REG_INLINE_C_FP32", "_Imm32">;
-def VISrc_256_b32 : RegOrImmOperand <"VReg_256", "OPERAND_REG_INLINE_C_INT32", "_Imm32">;
-def VISrc_256_f32 : RegOrImmOperand <"VReg_256", "OPERAND_REG_INLINE_C_FP32", "_Imm32">;
-def VISrc_256_f64 : RegOrImmOperand <"VReg_256", "OPERAND_REG_INLINE_C_FP64", "_Imm64">;
-def VISrc_512_b32 : RegOrImmOperand <"VReg_512", "OPERAND_REG_INLINE_C_INT32", "_Imm32">;
-def VISrc_512_f32 : RegOrImmOperand <"VReg_512", "OPERAND_REG_INLINE_C_FP32", "_Imm32">;
-def VISrc_1024_b32 : RegOrImmOperand <"VReg_1024", "OPERAND_REG_INLINE_C_INT32", "_Imm32">;
-def VISrc_1024_f32 : RegOrImmOperand <"VReg_1024", "OPERAND_REG_INLINE_C_FP32", "_Imm32">;
+def VISrc_64_f16 : SrcRegOrImm9 <VReg_64, "OPW64", "OPERAND_REG_INLINE_C_FP16", 16>;
+def VISrc_64_b32 : SrcRegOrImm9 <VReg_64, "OPW64", "OPERAND_REG_INLINE_C_INT32", 32>;
+def VISrc_64_f64 : SrcRegOrImm9 <VReg_64, "OPW64", "OPERAND_REG_INLINE_C_FP64", 64>;
+def VISrc_128_f16 : SrcRegOrImm9 <VReg_128, "OPW128", "OPERAND_REG_INLINE_C_FP16", 16>;
+def VISrc_128_b32 : SrcRegOrImm9 <VReg_128, "OPW128", "OPERAND_REG_INLINE_C_INT32", 32>;
+def VISrc_128_f32 : SrcRegOrImm9 <VReg_128, "OPW128", "OPERAND_REG_INLINE_C_FP32", 32>;
+def VISrc_256_b32 : SrcRegOrImm9 <VReg_256, "OPW256", "OPERAND_REG_INLINE_C_INT32", 32>;
+def VISrc_256_f32 : SrcRegOrImm9 <VReg_256, "OPW256", "OPERAND_REG_INLINE_C_FP32", 32>;
+def VISrc_256_f64 : SrcRegOrImm9 <VReg_256, "OPW256", "OPERAND_REG_INLINE_C_FP64", 64>;
+def VISrc_512_b32 : SrcRegOrImm9 <VReg_512, "OPW512", "OPERAND_REG_INLINE_C_INT32", 32>;
+def VISrc_512_f32 : SrcRegOrImm9 <VReg_512, "OPW512", "OPERAND_REG_INLINE_C_FP32", 32>;
+def VISrc_1024_b32 : SrcRegOrImm9 <VReg_1024, "OPW1024", "OPERAND_REG_INLINE_C_INT32", 32>;
+def VISrc_1024_f32 : SrcRegOrImm9 <VReg_1024, "OPW1024", "OPERAND_REG_INLINE_C_FP32", 32>;
//===----------------------------------------------------------------------===//
// AVSrc_*, AVDst_*, AVLdSt_* Operands with an AGPR or VGPR
@@ -1312,11 +1312,18 @@ def AVLdSt_160 : AVLdStOperand<AV_160, "OPW160">;
// ACSrc_* Operands with an AGPR or an inline constant
//===----------------------------------------------------------------------===//
-def AISrc_64_f64 : RegOrImmOperand <"AReg_64", "OPERAND_REG_INLINE_AC_FP64", "_Imm64">;
-def AISrc_128_f32 : RegOrImmOperand <"AReg_128", "OPERAND_REG_INLINE_AC_FP32", "_Imm32">;
-def AISrc_128_b32 : RegOrImmOperand <"AReg_128", "OPERAND_REG_INLINE_AC_INT32", "_Imm32">;
-def AISrc_256_f64 : RegOrImmOperand <"AReg_256", "OPERAND_REG_INLINE_AC_FP64", "_Imm64">;
-def AISrc_512_f32 : RegOrImmOperand <"AReg_512", "OPERAND_REG_INLINE_AC_FP32", "_Imm32">;
-def AISrc_512_b32 : RegOrImmOperand <"AReg_512", "OPERAND_REG_INLINE_AC_INT32", "_Imm32">;
-def AISrc_1024_f32 : RegOrImmOperand <"AReg_1024", "OPERAND_REG_INLINE_AC_FP32", "_Imm32">;
-def AISrc_1024_b32 : RegOrImmOperand <"AReg_1024", "OPERAND_REG_INLINE_AC_INT32", "_Imm32">;
+class SrcRegOrImmA9<RegisterClass regClass, string opWidth, string operandType,
+ int immWidth>
+ : RegOrImmOperand<regClass, operandType> {
+ let DecoderMethod = "decodeSrcRegOrImmA9<AMDGPUDisassembler::" # opWidth #
+ ", " # immWidth # ">";
+}
+
+def AISrc_64_f64 : SrcRegOrImmA9 <AReg_64, "OPW64", "OPERAND_REG_INLINE_AC_FP64", 64>;
+def AISrc_128_f32 : SrcRegOrImmA9 <AReg_128, "OPW128", "OPERAND_REG_INLINE_AC_FP32", 32>;
+def AISrc_128_b32 : SrcRegOrImmA9 <AReg_128, "OPW128", "OPERAND_REG_INLINE_AC_INT32", 32>;
+def AISrc_256_f64 : SrcRegOrImmA9 <AReg_256, "OPW256", "OPERAND_REG_INLINE_AC_FP64", 64>;
+def AISrc_512_f32 : SrcRegOrImmA9 <AReg_512, "OPW512", "OPERAND_REG_INLINE_AC_FP32", 32>;
+def AISrc_512_b32 : SrcRegOrImmA9 <AReg_512, "OPW512", "OPERAND_REG_INLINE_AC_INT32", 32>;
+def AISrc_1024_f32 : SrcRegOrImmA9 <AReg_1024, "OPW1024", "OPERAND_REG_INLINE_AC_FP32", 32>;
+def AISrc_1024_b32 : SrcRegOrImmA9 <AReg_1024, "OPW1024", "OPERAND_REG_INLINE_AC_INT32", 32>;
diff --git a/llvm/lib/Target/AMDGPU/SMInstructions.td b/llvm/lib/Target/AMDGPU/SMInstructions.td
index f309696..29651a8 100644
--- a/llvm/lib/Target/AMDGPU/SMInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SMInstructions.td
@@ -1068,6 +1068,20 @@ def : GCNPat <
}
} // let OtherPredicates = [HasShaderCyclesRegister]
+let OtherPredicates = [HasSMemRealTime] in {
+def : GCNPat <
+ (i64 (readsteadycounter)),
+ (S_MEMREALTIME)
+>;
+} // let OtherPredicates = [HasSMemRealTime]
+
+let SubtargetPredicate = isGFX11Plus in {
+def : GCNPat <
+ (i64 (readsteadycounter)),
+ (S_SENDMSG_RTN_B64 (i32 /*MSG_RTN_GET_REALTIME=*/0x83))
+>;
+} // let SubtargetPredicate = [isGFX11Plus]
+
def i32imm_zero : TImmLeaf <i32, [{
return Imm == 0;
}]>;
diff --git a/llvm/lib/Target/BPF/AsmParser/BPFAsmParser.cpp b/llvm/lib/Target/BPF/AsmParser/BPFAsmParser.cpp
index 90697c6..0d1eef6 100644
--- a/llvm/lib/Target/BPF/AsmParser/BPFAsmParser.cpp
+++ b/llvm/lib/Target/BPF/AsmParser/BPFAsmParser.cpp
@@ -229,6 +229,7 @@ public:
return StringSwitch<bool>(Name.lower())
.Case("if", true)
.Case("call", true)
+ .Case("callx", true)
.Case("goto", true)
.Case("gotol", true)
.Case("*", true)
diff --git a/llvm/lib/Target/BPF/BPFInstrInfo.td b/llvm/lib/Target/BPF/BPFInstrInfo.td
index 7d443a3..690d534 100644
--- a/llvm/lib/Target/BPF/BPFInstrInfo.td
+++ b/llvm/lib/Target/BPF/BPFInstrInfo.td
@@ -622,9 +622,9 @@ class CALLX<string OpcodeStr>
(ins GPR:$BrDst),
!strconcat(OpcodeStr, " $BrDst"),
[]> {
- bits<32> BrDst;
+ bits<4> BrDst;
- let Inst{31-0} = BrDst;
+ let Inst{51-48} = BrDst;
let BPFClass = BPF_JMP;
}
diff --git a/llvm/lib/Target/DirectX/DXIL.td b/llvm/lib/Target/DirectX/DXIL.td
index 3f3ace5..5215813 100644
--- a/llvm/lib/Target/DirectX/DXIL.td
+++ b/llvm/lib/Target/DirectX/DXIL.td
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
include "llvm/IR/Intrinsics.td"
+include "llvm/IR/Attributes.td"
// Abstract representation of the class a DXIL Operation belongs to.
class DXILOpClass<string name> {
@@ -34,12 +35,29 @@ def BinaryUintCategory : DXILOpCategory<"Binary uint">;
def UnaryFloatCategory : DXILOpCategory<"Unary float">;
def ComputeIDCategory : DXILOpCategory<"Compute/Mesh/Amplification shader">;
+// Following are the scalar types supported by DXIL operations and are synonymous
+// to llvm_*_ty defined for readability and ease of use in the context of this file.
+
+def voidTy : LLVMType<isVoid>;
+
+// Floating point types
+def f16Ty : LLVMType<f16>;
+def f32Ty : LLVMType<f32>;
+def f64Ty : LLVMType<f64>;
+
+// Integer types
+def i1Ty : LLVMType<i1>;
+def i8Ty : LLVMType<i8>;
+def i16Ty : LLVMType<i16>;
+def i32Ty : LLVMType<i32>;
+def i64Ty : LLVMType<i64>;
+
// The parameter description for a DXIL operation
class DXILOpParameter<int pos, string type, string name, string doc,
bit isConstant = 0, string enumName = "",
int maxValue = 0> {
int Pos = pos; // Position in parameter list
- string LLVMType = type; // LLVM type name, $o for overload, $r for resource
+ string Type = type; // LLVM type name, $o for overload, $r for resource
// type, $cb for legacy cbuffer, $u4 for u4 struct
string Name = name; // Short, unique parameter name
string Doc = doc; // Description of this parameter
@@ -56,9 +74,11 @@ class DXILOperationDesc {
DXILOpCategory OpCategory; // Category of the operation
string Doc = ""; // Description of the operation
list<DXILOpParameter> Params = []; // Parameter list of the operation
- string OverloadTypes = ""; // Overload types, if applicable
- string Attributes = ""; // Attribute shorthands: rn=does not access
- // memory,ro=only reads from memory,
+ list<LLVMType> OverloadTypes = []; // Overload types, if applicable
+ EnumAttr Attribute; // Operation Attribute. Leverage attributes defined in Attributes.td
+ // ReadNone - operation does not access memory.
+ // ReadOnly - only reads from memory.
+ // "ReadMemory" - reads memory
bit IsDerivative = 0; // Whether this is some kind of derivative
bit IsGradient = 0; // Whether this requires a gradient calculation
bit IsFeedback = 0; // Whether this is a sampler feedback operation
@@ -71,7 +91,7 @@ class DXILOperationDesc {
}
class DXILOperation<string name, int opCode, DXILOpClass opClass, DXILOpCategory opCategory, string doc,
- string oloadTypes, string attrs, list<DXILOpParameter> params,
+ list<LLVMType> oloadTypes, EnumAttr attrs, list<DXILOpParameter> params,
list<string> statsGroup = []> : DXILOperationDesc {
let OpName = name;
let OpCode = opCode;
@@ -80,7 +100,7 @@ class DXILOperation<string name, int opCode, DXILOpClass opClass, DXILOpCategory
let OpClass = opClass;
let OpCategory = opCategory;
let OverloadTypes = oloadTypes;
- let Attributes = attrs;
+ let Attribute = attrs;
let StatsGroup = statsGroup;
}
@@ -88,7 +108,7 @@ class DXILOperation<string name, int opCode, DXILOpClass opClass, DXILOpCategory
class LLVMIntrinsic<Intrinsic llvm_intrinsic_> { Intrinsic llvm_intrinsic = llvm_intrinsic_; }
def Sin : DXILOperation<"Sin", 13, UnaryClass, UnaryFloatCategory, "returns sine(theta) for theta in radians.",
- "half;float;", "rn",
+ [f16Ty,f32Ty], ReadNone,
[
DXILOpParameter<0, "$o", "", "operation result">,
DXILOpParameter<1, "i32", "opcode", "DXIL opcode">,
@@ -98,7 +118,7 @@ def Sin : DXILOperation<"Sin", 13, UnaryClass, UnaryFloatCategory, "returns sine
LLVMIntrinsic<int_sin>;
def UMax : DXILOperation< "UMax", 39, BinaryClass, BinaryUintCategory, "unsigned integer maximum. UMax(a,b) = a > b ? a : b",
- "i16;i32;i64;", "rn",
+ [i16Ty,i32Ty,i64Ty], ReadNone,
[
DXILOpParameter<0, "$o", "", "operation result">,
DXILOpParameter<1, "i32", "opcode", "DXIL opcode">,
@@ -108,7 +128,7 @@ def UMax : DXILOperation< "UMax", 39, BinaryClass, BinaryUintCategory, "unsign
["uints"]>,
LLVMIntrinsic<int_umax>;
-def ThreadId : DXILOperation< "ThreadId", 93, ThreadIdClass, ComputeIDCategory, "reads the thread ID", "i32;", "rn",
+def ThreadId : DXILOperation< "ThreadId", 93, ThreadIdClass, ComputeIDCategory, "reads the thread ID", [i32Ty], ReadNone,
[
DXILOpParameter<0, "i32", "", "thread ID component">,
DXILOpParameter<1, "i32", "opcode", "DXIL opcode">,
@@ -116,7 +136,7 @@ def ThreadId : DXILOperation< "ThreadId", 93, ThreadIdClass, ComputeIDCategory,
]>,
LLVMIntrinsic<int_dx_thread_id>;
-def GroupId : DXILOperation< "GroupId", 94, GroupIdClass, ComputeIDCategory, "reads the group ID (SV_GroupID)", "i32;", "rn",
+def GroupId : DXILOperation< "GroupId", 94, GroupIdClass, ComputeIDCategory, "reads the group ID (SV_GroupID)", [i32Ty], ReadNone,
[
DXILOpParameter<0, "i32", "", "group ID component">,
DXILOpParameter<1, "i32", "opcode", "DXIL opcode">,
@@ -125,7 +145,7 @@ def GroupId : DXILOperation< "GroupId", 94, GroupIdClass, ComputeIDCategory, "r
LLVMIntrinsic<int_dx_group_id>;
def ThreadIdInGroup : DXILOperation< "ThreadIdInGroup", 95, ThreadIdInGroupClass, ComputeIDCategory,
- "reads the thread ID within the group (SV_GroupThreadID)", "i32;", "rn",
+ "reads the thread ID within the group (SV_GroupThreadID)", [i32Ty], ReadNone,
[
DXILOpParameter<0, "i32", "", "thread ID in group component">,
DXILOpParameter<1, "i32", "opcode", "DXIL opcode">,
@@ -134,7 +154,7 @@ def ThreadIdInGroup : DXILOperation< "ThreadIdInGroup", 95, ThreadIdInGroupClas
LLVMIntrinsic<int_dx_thread_id_in_group>;
def FlattenedThreadIdInGroup : DXILOperation< "FlattenedThreadIdInGroup", 96, FlattenedThreadIdInGroupClass, ComputeIDCategory,
- "provides a flattened index for a given thread within a given group (SV_GroupIndex)", "i32;", "rn",
+ "provides a flattened index for a given thread within a given group (SV_GroupIndex)", [i32Ty], ReadNone,
[
DXILOpParameter<0, "i32", "", "result">,
DXILOpParameter<1, "i32", "opcode", "DXIL opcode">
diff --git a/llvm/lib/Target/Hexagon/HexagonPatterns.td b/llvm/lib/Target/Hexagon/HexagonPatterns.td
index 9de50b4..ea7c4ac 100644
--- a/llvm/lib/Target/Hexagon/HexagonPatterns.td
+++ b/llvm/lib/Target/Hexagon/HexagonPatterns.td
@@ -525,11 +525,11 @@ def Vsplatpi: OutPatFrag<(ops node:$V),
(Combinew (A2_tfrsi $V), (A2_tfrsi $V))>;
def: Pat<(v2i16 (azext V2I1:$Pu)),
- (A2_andir (LoReg (C2_mask V2I1:$Pu)), (i32 0x00010001))>;
+ (A2_andir (S2_vtrunehb (C2_mask V2I1:$Pu)), (i32 0x00010001))>;
def: Pat<(v2i32 (azext V2I1:$Pu)),
(A2_andp (C2_mask V2I1:$Pu), (A2_combineii (i32 1), (i32 1)))>;
def: Pat<(v4i8 (azext V4I1:$Pu)),
- (A2_andir (LoReg (C2_mask V4I1:$Pu)), (i32 0x01010101))>;
+ (A2_andir (S2_vtrunehb (C2_mask V4I1:$Pu)), (i32 0x01010101))>;
def: Pat<(v4i16 (azext V4I1:$Pu)),
(A2_andp (C2_mask V4I1:$Pu), (Vsplatpi (i32 0x00010001)))>;
def: Pat<(v8i8 (azext V8I1:$Pu)),
diff --git a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
index 3c673ae..36aab38 100644
--- a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
+++ b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
@@ -2920,6 +2920,11 @@ bool MipsAsmParser::loadAndAddSymbolAddress(const MCExpr *SymExpr,
(Res.getSymA()->getSymbol().isELF() &&
cast<MCSymbolELF>(Res.getSymA()->getSymbol()).getBinding() ==
ELF::STB_LOCAL);
+ // For O32, "$"-prefixed symbols are recognized as temporary while
+ // .L-prefixed symbols are not (PrivateGlobalPrefix is "$"). Recognize ".L"
+ // manually.
+ if (ABI.IsO32() && Res.getSymA()->getSymbol().getName().starts_with(".L"))
+ IsLocalSym = true;
bool UseXGOT = STI->hasFeature(Mips::FeatureXGOT) && !IsLocalSym;
// The case where the result register is $25 is somewhat special. If the
@@ -6359,7 +6364,7 @@ bool MipsAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
return true;
SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
- MCSymbol *Sym = getContext().getOrCreateSymbol("$" + Identifier);
+ MCSymbol *Sym = getContext().getOrCreateSymbol(Identifier);
// Otherwise create a symbol reference.
const MCExpr *SymRef =
MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 80a67ca..7f58b31 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -490,6 +490,8 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f16, Expand);
setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
+ if (STI.getSmVersion() >= 30 && STI.getPTXVersion() > 31)
+ setOperationAction(ISD::READSTEADYCOUNTER, MVT::i64, Legal);
setFP16OperationAction(ISD::SETCC, MVT::f16, Legal, Promote);
setFP16OperationAction(ISD::SETCC, MVT::v2f16, Legal, Expand);
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index 4322eae..631136a 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -3805,7 +3805,6 @@ def CALL_PROTOTYPE :
include "NVPTXIntrinsics.td"
-
//-----------------------------------
// Notes
//-----------------------------------
diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index 133e282..477789a 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -6382,6 +6382,7 @@ def INT_PTX_SREG_GLOBALTIMER :
}
def: Pat <(i64 (readcyclecounter)), (INT_PTX_SREG_CLOCK64)>;
+def: Pat <(i64 (readsteadycounter)), (INT_PTX_SREG_GLOBALTIMER)>;
def INT_PTX_SREG_PM0 : PTX_READ_SREG_R32<"pm0", int_nvvm_read_ptx_sreg_pm0>;
def INT_PTX_SREG_PM1 : PTX_READ_SREG_R32<"pm1", int_nvvm_read_ptx_sreg_pm1>;
diff --git a/llvm/lib/Target/RISCV/RISCVFoldMasks.cpp b/llvm/lib/Target/RISCV/RISCVFoldMasks.cpp
index 271d28f..fddbaa9 100644
--- a/llvm/lib/Target/RISCV/RISCVFoldMasks.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFoldMasks.cpp
@@ -47,10 +47,10 @@ public:
StringRef getPassName() const override { return "RISC-V Fold Masks"; }
private:
- bool convertToUnmasked(MachineInstr &MI, MachineInstr *MaskDef);
- bool convertVMergeToVMv(MachineInstr &MI, MachineInstr *MaskDef);
+ bool convertToUnmasked(MachineInstr &MI, MachineInstr *MaskDef) const;
+ bool convertVMergeToVMv(MachineInstr &MI, MachineInstr *MaskDef) const;
- bool isAllOnesMask(MachineInstr *MaskDef);
+ bool isAllOnesMask(MachineInstr *MaskDef) const;
};
} // namespace
@@ -59,7 +59,7 @@ char RISCVFoldMasks::ID = 0;
INITIALIZE_PASS(RISCVFoldMasks, DEBUG_TYPE, "RISC-V Fold Masks", false, false)
-bool RISCVFoldMasks::isAllOnesMask(MachineInstr *MaskDef) {
+bool RISCVFoldMasks::isAllOnesMask(MachineInstr *MaskDef) const {
if (!MaskDef)
return false;
assert(MaskDef->isCopy() && MaskDef->getOperand(0).getReg() == RISCV::V0);
@@ -89,7 +89,8 @@ bool RISCVFoldMasks::isAllOnesMask(MachineInstr *MaskDef) {
// Transform (VMERGE_VVM_<LMUL> false, false, true, allones, vl, sew) to
// (VMV_V_V_<LMUL> false, true, vl, sew). It may decrease uses of VMSET.
-bool RISCVFoldMasks::convertVMergeToVMv(MachineInstr &MI, MachineInstr *V0Def) {
+bool RISCVFoldMasks::convertVMergeToVMv(MachineInstr &MI,
+ MachineInstr *V0Def) const {
#define CASE_VMERGE_TO_VMV(lmul) \
case RISCV::PseudoVMERGE_VVM_##lmul: \
NewOpc = RISCV::PseudoVMV_V_V_##lmul; \
@@ -133,7 +134,7 @@ bool RISCVFoldMasks::convertVMergeToVMv(MachineInstr &MI, MachineInstr *V0Def) {
}
bool RISCVFoldMasks::convertToUnmasked(MachineInstr &MI,
- MachineInstr *MaskDef) {
+ MachineInstr *MaskDef) const {
const RISCV::RISCVMaskedPseudoInfo *I =
RISCV::getMaskedPseudoInfo(MI.getOpcode());
if (!I)
diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
index 37672dd..7e3dcb3 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -291,9 +291,7 @@ static Register getMaxPushPopReg(const MachineFunction &MF,
const std::vector<CalleeSavedInfo> &CSI) {
Register MaxPushPopReg = RISCV::NoRegister;
for (auto &CS : CSI) {
- // RISCVRegisterInfo::hasReservedSpillSlot assigns negative frame indices to
- // registers which can be saved by Zcmp Push.
- if (CS.getFrameIdx() < 0)
+ if (llvm::is_contained(AllPopRegs, CS.getReg().id()))
MaxPushPopReg = std::max(MaxPushPopReg.id(), CS.getReg().id());
}
// if rlist is {rs, s0-s10}, then s11 will also be included
@@ -532,8 +530,8 @@ void RISCVFrameLowering::emitPrologue(MachineFunction &MF,
// FIXME (note copied from Lanai): This appears to be overallocating. Needs
// investigation. Get the number of bytes to allocate from the FrameInfo.
- uint64_t StackSize = getStackSizeWithRVVPadding(MF);
- uint64_t RealStackSize = StackSize + RVFI->getReservedSpillsSize();
+ uint64_t RealStackSize = getStackSizeWithRVVPadding(MF);
+ uint64_t StackSize = RealStackSize - RVFI->getReservedSpillsSize();
uint64_t RVVStackSize = RVFI->getRVVStackSize();
// Early exit if there is no need to allocate on the stack
@@ -590,20 +588,7 @@ void RISCVFrameLowering::emitPrologue(MachineFunction &MF,
// directives.
for (const auto &Entry : CSI) {
int FrameIdx = Entry.getFrameIdx();
- int64_t Offset;
- // Offsets for objects with fixed locations (IE: those saved by libcall) are
- // simply calculated from the frame index.
- if (FrameIdx < 0) {
- if (RVFI->isPushable(MF)) {
- // Callee-saved register stored by Zcmp push is in reverse order.
- Offset = -(FrameIdx + RVFI->getRVPushRegs() + 1) *
- (int64_t)STI.getXLen() / 8;
- } else {
- Offset = FrameIdx * (int64_t)STI.getXLen() / 8;
- }
- } else {
- Offset = MFI.getObjectOffset(FrameIdx) - RVFI->getReservedSpillsSize();
- }
+ int64_t Offset = MFI.getObjectOffset(FrameIdx);
Register Reg = Entry.getReg();
unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset(
nullptr, RI->getDwarfRegNum(Reg, true), Offset));
@@ -746,8 +731,8 @@ void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
if (!CSI.empty())
LastFrameDestroy = std::prev(MBBI, CSI.size());
- uint64_t StackSize = getStackSizeWithRVVPadding(MF);
- uint64_t RealStackSize = StackSize + RVFI->getReservedSpillsSize();
+ uint64_t RealStackSize = getStackSizeWithRVVPadding(MF);
+ uint64_t StackSize = RealStackSize - RVFI->getReservedSpillsSize();
uint64_t FPOffset = RealStackSize - RVFI->getVarArgsSaveSize();
uint64_t RVVStackSize = RVFI->getRVVStackSize();
@@ -897,8 +882,6 @@ RISCVFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
if (FrameReg == getFPReg(STI)) {
Offset += StackOffset::getFixed(RVFI->getVarArgsSaveSize());
- if (FI >= 0)
- Offset -= StackOffset::getFixed(RVFI->getReservedSpillsSize());
// When using FP to access scalable vector objects, we need to minus
// the frame size.
//
@@ -965,8 +948,7 @@ RISCVFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
if (MFI.isFixedObjectIndex(FI)) {
assert(!RI->hasStackRealignment(MF) &&
"Can't index across variable sized realign");
- Offset += StackOffset::get(getStackSizeWithRVVPadding(MF) +
- RVFI->getReservedSpillsSize(),
+ Offset += StackOffset::get(getStackSizeWithRVVPadding(MF),
RVFI->getRVVStackSize());
} else {
Offset += StackOffset::getFixed(MFI.getStackSize());
@@ -1243,16 +1225,10 @@ void RISCVFrameLowering::processFunctionBeforeFrameFinalized(
RVFI->setBranchRelaxationScratchFrameIndex(FI);
}
- if (MFI.getCalleeSavedInfo().empty() || RVFI->useSaveRestoreLibCalls(MF) ||
- RVFI->isPushable(MF)) {
- RVFI->setCalleeSavedStackSize(0);
- return;
- }
-
- unsigned Size = 0;
+ unsigned Size = RVFI->getReservedSpillsSize();
for (const auto &Info : MFI.getCalleeSavedInfo()) {
int FrameIdx = Info.getFrameIdx();
- if (MFI.getStackID(FrameIdx) != TargetStackID::Default)
+ if (FrameIdx < 0 || MFI.getStackID(FrameIdx) != TargetStackID::Default)
continue;
Size += MFI.getObjectSize(FrameIdx);
@@ -1260,30 +1236,6 @@ void RISCVFrameLowering::processFunctionBeforeFrameFinalized(
RVFI->setCalleeSavedStackSize(Size);
}
-void RISCVFrameLowering::processFunctionBeforeFrameIndicesReplaced(
- MachineFunction &MF, RegScavenger *RS) const {
- // Remove CalleeSavedInfo for registers saved by Zcmp or save/restore
- // libcalls.
- MachineFrameInfo &MFI = MF.getFrameInfo();
- const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
- const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
- if (!RVFI->isPushable(MF) && !RVFI->useSaveRestoreLibCalls(MF))
- return;
- const std::vector<CalleeSavedInfo> &CSIs = MFI.getCalleeSavedInfo();
- std::vector<CalleeSavedInfo> NewCSIs;
- for (const auto &CSI : CSIs) {
- // Skip CSRs that have fake a frame index.
- int ReservedFI = 0;
- if (TRI->hasReservedSpillSlot(MF, CSI.getReg(), ReservedFI)) {
- assert(CSI.getFrameIdx() == ReservedFI &&
- "Reserved CSR spill slot frame index mismatch in CSI");
- continue;
- }
- NewCSIs.push_back(CSI);
- }
- MFI.setCalleeSavedInfo(std::move(NewCSIs));
-}
-
// Not preserve stack space within prologue for outgoing variables when the
// function contains variable size objects or there are vector objects accessed
// by the frame pointer.
@@ -1403,6 +1355,93 @@ RISCVFrameLowering::getFirstSPAdjustAmount(const MachineFunction &MF) const {
return 0;
}
+// Offsets which need to be scale by XLen representing locations of CSRs which
+// are given a fixed location by save/restore libcalls or Zcmp Push/Pop.
+static const std::pair<MCPhysReg, int8_t> FixedCSRFIMap[] = {
+ {/*ra*/ RISCV::X1, -1}, {/*s0*/ RISCV::X8, -2},
+ {/*s1*/ RISCV::X9, -3}, {/*s2*/ RISCV::X18, -4},
+ {/*s3*/ RISCV::X19, -5}, {/*s4*/ RISCV::X20, -6},
+ {/*s5*/ RISCV::X21, -7}, {/*s6*/ RISCV::X22, -8},
+ {/*s7*/ RISCV::X23, -9}, {/*s8*/ RISCV::X24, -10},
+ {/*s9*/ RISCV::X25, -11}, {/*s10*/ RISCV::X26, -12},
+ {/*s11*/ RISCV::X27, -13}};
+
+bool RISCVFrameLowering::assignCalleeSavedSpillSlots(
+ MachineFunction &MF, const TargetRegisterInfo *TRI,
+ std::vector<CalleeSavedInfo> &CSI, unsigned &MinCSFrameIndex,
+ unsigned &MaxCSFrameIndex) const {
+ // Early exit if no callee saved registers are modified!
+ if (CSI.empty())
+ return true;
+
+ auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
+
+ if (RVFI->isPushable(MF)) {
+ // Determine how many GPRs we need to push and save it to RVFI.
+ Register MaxReg = getMaxPushPopReg(MF, CSI);
+ if (MaxReg != RISCV::NoRegister) {
+ auto [RegEnc, PushedRegNum] = getPushPopEncodingAndNum(MaxReg);
+ RVFI->setRVPushRegs(PushedRegNum);
+ RVFI->setRVPushStackSize(alignTo((STI.getXLen() / 8) * PushedRegNum, 16));
+
+ // Use encoded number to represent registers to spill.
+ RVFI->setRVPushRlist(RegEnc);
+ }
+ }
+
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
+
+ for (auto &CS : CSI) {
+ unsigned Reg = CS.getReg();
+ const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);
+ unsigned Size = RegInfo->getSpillSize(*RC);
+
+ // This might need a fixed stack slot.
+ if (RVFI->useSaveRestoreLibCalls(MF) || RVFI->isPushable(MF)) {
+ const auto *FII = llvm::find_if(
+ FixedCSRFIMap, [&](auto P) { return P.first == CS.getReg(); });
+ if (FII != std::end(FixedCSRFIMap)) {
+ int64_t Offset;
+ if (RVFI->isPushable(MF))
+ Offset = -((FII->second + RVFI->getRVPushRegs() + 1) * (int64_t)Size);
+ else
+ Offset = FII->second * (int64_t)Size;
+
+ int FrameIdx = MFI.CreateFixedSpillStackObject(Size, Offset);
+ assert(FrameIdx < 0);
+ CS.setFrameIdx(FrameIdx);
+ continue;
+ }
+ }
+
+ // Not a fixed slot.
+ Align Alignment = RegInfo->getSpillAlign(*RC);
+ // We may not be able to satisfy the desired alignment specification of
+ // the TargetRegisterClass if the stack alignment is smaller. Use the
+ // min.
+ Alignment = std::min(Alignment, getStackAlign());
+ int FrameIdx = MFI.CreateStackObject(Size, Alignment, true);
+ if ((unsigned)FrameIdx < MinCSFrameIndex)
+ MinCSFrameIndex = FrameIdx;
+ if ((unsigned)FrameIdx > MaxCSFrameIndex)
+ MaxCSFrameIndex = FrameIdx;
+ CS.setFrameIdx(FrameIdx);
+ }
+
+ // Allocate a fixed object that covers the full push or libcall size.
+ if (RVFI->isPushable(MF)) {
+ if (int64_t PushSize = RVFI->getRVPushStackSize())
+ MFI.CreateFixedSpillStackObject(PushSize, -PushSize);
+ } else if (int LibCallRegs = getLibCallID(MF, CSI) + 1) {
+ int64_t LibCallFrameSize =
+ alignTo((STI.getXLen() / 8) * LibCallRegs, getStackAlign());
+ MFI.CreateFixedSpillStackObject(LibCallFrameSize, -LibCallFrameSize);
+ }
+
+ return true;
+}
+
bool RISCVFrameLowering::spillCalleeSavedRegisters(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
@@ -1418,14 +1457,10 @@ bool RISCVFrameLowering::spillCalleeSavedRegisters(
// Emit CM.PUSH with base SPimm & evaluate Push stack
RISCVMachineFunctionInfo *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
if (RVFI->isPushable(*MF)) {
- Register MaxReg = getMaxPushPopReg(*MF, CSI);
- if (MaxReg != RISCV::NoRegister) {
- auto [RegEnc, PushedRegNum] = getPushPopEncodingAndNum(MaxReg);
- RVFI->setRVPushRegs(PushedRegNum);
- RVFI->setRVPushStackSize(alignTo((STI.getXLen() / 8) * PushedRegNum, 16));
-
+ unsigned PushedRegNum = RVFI->getRVPushRegs();
+ if (PushedRegNum > 0) {
// Use encoded number to represent registers to spill.
- RVFI->setRVPushRlist(RegEnc);
+ int RegEnc = RVFI->getRVPushRlist();
MachineInstrBuilder PushBuilder =
BuildMI(MBB, MI, DL, TII.get(RISCV::CM_PUSH))
.setMIFlag(MachineInstr::FrameSetup);
diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.h b/llvm/lib/Target/RISCV/RISCVFrameLowering.h
index a784479..210f8c1 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.h
@@ -37,10 +37,6 @@ public:
void processFunctionBeforeFrameFinalized(MachineFunction &MF,
RegScavenger *RS) const override;
- void
- processFunctionBeforeFrameIndicesReplaced(MachineFunction &MF,
- RegScavenger *RS) const override;
-
bool hasFP(const MachineFunction &MF) const override;
bool hasBP(const MachineFunction &MF) const;
@@ -49,6 +45,12 @@ public:
MachineBasicBlock::iterator
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const override;
+
+ bool assignCalleeSavedSpillSlots(MachineFunction &MF,
+ const TargetRegisterInfo *TRI,
+ std::vector<CalleeSavedInfo> &CSI,
+ unsigned &MinCSFrameIndex,
+ unsigned &MaxCSFrameIndex) const override;
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
ArrayRef<CalleeSavedInfo> CSI,
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 12c0cd5..8235b53 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -3877,6 +3877,47 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
return convertFromScalableVector(VT, Vec, DAG, Subtarget);
}
+ // For m1 vectors, if we have non-undef values in both halves of our vector,
+ // split the vector into low and high halves, build them separately, then
+ // use a vselect to combine them. For long vectors, this cuts the critical
+ // path of the vslide1down sequence in half, and gives us an opportunity
+ // to special case each half independently. Note that we don't change the
+ // length of the sub-vectors here, so if both fallback to the generic
+ // vslide1down path, we should be able to fold the vselect into the final
+ // vslidedown (for the undef tail) for the first half w/ masking.
+ unsigned NumElts = VT.getVectorNumElements();
+ unsigned NumUndefElts =
+ count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
+ unsigned NumDefElts = NumElts - NumUndefElts;
+ if (NumDefElts >= 8 && NumDefElts > NumElts / 2 &&
+ ContainerVT.bitsLE(getLMUL1VT(ContainerVT))) {
+ SmallVector<SDValue> SubVecAOps, SubVecBOps;
+ SmallVector<SDValue> MaskVals;
+ SDValue UndefElem = DAG.getUNDEF(Op->getOperand(0)->getValueType(0));
+ SubVecAOps.reserve(NumElts);
+ SubVecBOps.reserve(NumElts);
+ for (unsigned i = 0; i < NumElts; i++) {
+ SDValue Elem = Op->getOperand(i);
+ if (i < NumElts / 2) {
+ SubVecAOps.push_back(Elem);
+ SubVecBOps.push_back(UndefElem);
+ } else {
+ SubVecAOps.push_back(UndefElem);
+ SubVecBOps.push_back(Elem);
+ }
+ bool SelectMaskVal = (i < NumElts / 2);
+ MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
+ }
+ assert(SubVecAOps.size() == NumElts && SubVecBOps.size() == NumElts &&
+ MaskVals.size() == NumElts);
+
+ SDValue SubVecA = DAG.getBuildVector(VT, DL, SubVecAOps);
+ SDValue SubVecB = DAG.getBuildVector(VT, DL, SubVecBOps);
+ MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
+ SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
+ return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, SubVecA, SubVecB);
+ }
+
// Cap the cost at a value linear to the number of elements in the vector.
// The default lowering is to use the stack. The vector store + scalar loads
// is linear in VL. However, at high lmuls vslide1down and vslidedown end up
@@ -9727,12 +9768,15 @@ SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
if (OrigIdx == 0)
return Op;
- // If the subvector vector is a fixed-length type, we cannot use subregister
- // manipulation to simplify the codegen; we don't know which register of a
- // LMUL group contains the specific subvector as we only know the minimum
- // register size. Therefore we must slide the vector group down the full
- // amount.
- if (SubVecVT.isFixedLengthVector()) {
+ const unsigned MinVLen = Subtarget.getRealMinVLen();
+ const unsigned MaxVLen = Subtarget.getRealMaxVLen();
+
+ // If the subvector vector is a fixed-length type and we don't know VLEN
+ // exactly, we cannot use subregister manipulation to simplify the codegen; we
+ // don't know which register of a LMUL group contains the specific subvector
+ // as we only know the minimum register size. Therefore we must slide the
+ // vector group down the full amount.
+ if (SubVecVT.isFixedLengthVector() && MinVLen != MaxVLen) {
MVT ContainerVT = VecVT;
if (VecVT.isFixedLengthVector()) {
ContainerVT = getContainerForFixedLengthVector(VecVT);
@@ -9764,19 +9808,48 @@ SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
return DAG.getBitcast(Op.getValueType(), Slidedown);
}
+ if (VecVT.isFixedLengthVector()) {
+ VecVT = getContainerForFixedLengthVector(VecVT);
+ Vec = convertToScalableVector(VecVT, Vec, DAG, Subtarget);
+ }
+
+ MVT ContainerSubVecVT = SubVecVT;
+ if (SubVecVT.isFixedLengthVector())
+ ContainerSubVecVT = getContainerForFixedLengthVector(SubVecVT);
+
unsigned SubRegIdx, RemIdx;
- std::tie(SubRegIdx, RemIdx) =
- RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
- VecVT, SubVecVT, OrigIdx, TRI);
+ // extract_subvector scales the index by vscale is the subvector is scalable,
+ // and decomposeSubvectorInsertExtractToSubRegs takes this into account. So if
+ // we have a fixed length subvector, we need to adjust the index by 1/vscale.
+ if (SubVecVT.isFixedLengthVector()) {
+ assert(MinVLen == MaxVLen);
+ unsigned Vscale = MinVLen / RISCV::RVVBitsPerBlock;
+ std::tie(SubRegIdx, RemIdx) =
+ RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
+ VecVT, ContainerSubVecVT, OrigIdx / Vscale, TRI);
+ RemIdx = (RemIdx * Vscale) + (OrigIdx % Vscale);
+ } else {
+ std::tie(SubRegIdx, RemIdx) =
+ RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
+ VecVT, ContainerSubVecVT, OrigIdx, TRI);
+ }
// If the Idx has been completely eliminated then this is a subvector extract
// which naturally aligns to a vector register. These can easily be handled
// using subregister manipulation.
- if (RemIdx == 0)
+ if (RemIdx == 0) {
+ if (SubVecVT.isFixedLengthVector()) {
+ Vec = DAG.getTargetExtractSubreg(SubRegIdx, DL, ContainerSubVecVT, Vec);
+ return convertFromScalableVector(SubVecVT, Vec, DAG, Subtarget);
+ }
return Op;
+ }
- // Else SubVecVT is a fractional LMUL and may need to be slid down.
- assert(RISCVVType::decodeVLMUL(getLMUL(SubVecVT)).second);
+ // Else SubVecVT is M1 or smaller and may need to be slid down: if SubVecVT
+ // was > M1 then the index would need to be a multiple of VLMAX, and so would
+ // divide exactly.
+ assert(RISCVVType::decodeVLMUL(getLMUL(ContainerSubVecVT)).second ||
+ getLMUL(ContainerSubVecVT) == RISCVII::VLMUL::LMUL_1);
// If the vector type is an LMUL-group type, extract a subvector equal to the
// nearest full vector register type.
@@ -9791,10 +9864,17 @@ SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
// Slide this vector register down by the desired number of elements in order
// to place the desired subvector starting at element 0.
- SDValue SlidedownAmt =
- DAG.getVScale(DL, XLenVT, APInt(XLenVT.getSizeInBits(), RemIdx));
+ SDValue SlidedownAmt;
+ if (SubVecVT.isFixedLengthVector())
+ SlidedownAmt = DAG.getConstant(RemIdx, DL, Subtarget.getXLenVT());
+ else
+ SlidedownAmt =
+ DAG.getVScale(DL, XLenVT, APInt(XLenVT.getSizeInBits(), RemIdx));
auto [Mask, VL] = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
+ if (SubVecVT.isFixedLengthVector())
+ VL = getVLOp(SubVecVT.getVectorNumElements(), InterSubVT, DL, DAG,
+ Subtarget);
SDValue Slidedown =
getVSlidedown(DAG, Subtarget, DL, InterSubVT, DAG.getUNDEF(InterSubVT),
Vec, SlidedownAmt, Mask, VL);
@@ -11067,8 +11147,8 @@ RISCVTargetLowering::lowerVPReverseExperimental(SDValue Op,
// Slide off any elements from past EVL that were reversed into the low
// elements.
unsigned MinElts = GatherVT.getVectorMinNumElements();
- SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
- DAG.getConstant(MinElts, DL, XLenVT));
+ SDValue VLMax =
+ DAG.getVScale(DL, XLenVT, APInt(XLenVT.getSizeInBits(), MinElts));
SDValue Diff = DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, EVL);
Result = getVSlidedown(DAG, Subtarget, DL, GatherVT,
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index 30457f5..ca519db 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -156,40 +156,6 @@ const uint32_t *RISCVRegisterInfo::getNoPreservedMask() const {
return CSR_NoRegs_RegMask;
}
-// Frame indexes representing locations of CSRs which are given a fixed location
-// by save/restore libcalls or Zcmp Push/Pop.
-static const std::pair<unsigned, int> FixedCSRFIMap[] = {
- {/*ra*/ RISCV::X1, -1},
- {/*s0*/ RISCV::X8, -2},
- {/*s1*/ RISCV::X9, -3},
- {/*s2*/ RISCV::X18, -4},
- {/*s3*/ RISCV::X19, -5},
- {/*s4*/ RISCV::X20, -6},
- {/*s5*/ RISCV::X21, -7},
- {/*s6*/ RISCV::X22, -8},
- {/*s7*/ RISCV::X23, -9},
- {/*s8*/ RISCV::X24, -10},
- {/*s9*/ RISCV::X25, -11},
- {/*s10*/ RISCV::X26, -12},
- {/*s11*/ RISCV::X27, -13}
-};
-
-bool RISCVRegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
- Register Reg,
- int &FrameIdx) const {
- const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
- if (!RVFI->useSaveRestoreLibCalls(MF) && !RVFI->isPushable(MF))
- return false;
-
- const auto *FII =
- llvm::find_if(FixedCSRFIMap, [&](auto P) { return P.first == Reg; });
- if (FII == std::end(FixedCSRFIMap))
- return false;
-
- FrameIdx = FII->second;
- return true;
-}
-
void RISCVRegisterInfo::adjustReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator II,
const DebugLoc &DL, Register DestReg,
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
index 8b729ca..431ea23 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
@@ -35,9 +35,6 @@ struct RISCVRegisterInfo : public RISCVGenRegisterInfo {
const uint32_t *getNoPreservedMask() const override;
- bool hasReservedSpillSlot(const MachineFunction &MF, Register Reg,
- int &FrameIdx) const override;
-
// Update DestReg to have the value SrcReg plus an offset. This is
// used during frame layout, and we may need to ensure that if we
// split the offset internally that the DestReg is always aligned,
diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
index 4c3da3a..adef40e 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -366,6 +366,7 @@ public:
void addIRPasses() override;
bool addPreISel() override;
+ void addCodeGenPrepare() override;
bool addInstSelector() override;
bool addIRTranslator() override;
void addPreLegalizeMachineIR() override;
@@ -452,6 +453,12 @@ bool RISCVPassConfig::addPreISel() {
return false;
}
+void RISCVPassConfig::addCodeGenPrepare() {
+ if (getOptLevel() != CodeGenOptLevel::None)
+ addPass(createTypePromotionLegacyPass());
+ TargetPassConfig::addCodeGenPrepare();
+}
+
bool RISCVPassConfig::addInstSelector() {
addPass(createRISCVISelDag(getRISCVTargetMachine(), getOptLevel()));
diff --git a/llvm/lib/Target/SPIRV/Analysis/CMakeLists.txt b/llvm/lib/Target/SPIRV/Analysis/CMakeLists.txt
index 132d8ff..4d43511 100644
--- a/llvm/lib/Target/SPIRV/Analysis/CMakeLists.txt
+++ b/llvm/lib/Target/SPIRV/Analysis/CMakeLists.txt
@@ -2,8 +2,10 @@ add_llvm_component_library(LLVMSPIRVAnalysis
SPIRVConvergenceRegionAnalysis.cpp
LINK_COMPONENTS
+ Analysis
Core
Support
+ TransformUtils
ADD_TO_COMPONENT
SPIRV
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index bdefb08..13184a1 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -1764,9 +1764,14 @@ SparcTargetLowering::SparcTargetLowering(const TargetMachine &TM,
// Atomics are supported on SparcV9. 32-bit atomics are also
// supported by some Leon SparcV8 variants. Otherwise, atomics
// are unsupported.
- if (Subtarget->isV9())
- setMaxAtomicSizeInBitsSupported(64);
- else if (Subtarget->hasLeonCasa())
+ if (Subtarget->isV9()) {
+ // TODO: we _ought_ to be able to support 64-bit atomics on 32-bit sparcv9,
+ // but it hasn't been implemented in the backend yet.
+ if (Subtarget->is64Bit())
+ setMaxAtomicSizeInBitsSupported(64);
+ else
+ setMaxAtomicSizeInBitsSupported(32);
+ } else if (Subtarget->hasLeonCasa())
setMaxAtomicSizeInBitsSupported(32);
else
setMaxAtomicSizeInBitsSupported(0);
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 18f9871..0676767 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -52816,7 +52816,7 @@ static SDValue getInvertedVectorForFMA(SDValue V, SelectionDAG &DAG) {
SmallVector<SDValue, 8> Ops;
EVT VT = V.getValueType();
EVT EltVT = VT.getVectorElementType();
- for (auto Op : V->op_values()) {
+ for (const SDValue &Op : V->op_values()) {
if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
Ops.push_back(DAG.getConstantFP(-Cst->getValueAPF(), SDLoc(Op), EltVT));
} else {
@@ -52838,8 +52838,8 @@ static SDValue getInvertedVectorForFMA(SDValue V, SelectionDAG &DAG) {
// prefer one of the values. We prefer a constant with a negative value on
// the first place.
// N.B. We need to skip undefs that may precede a value.
- for (auto op : V->op_values()) {
- if (auto *Cst = dyn_cast<ConstantFPSDNode>(op)) {
+ for (const SDValue &Op : V->op_values()) {
+ if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
if (Cst->isNegative())
return SDValue();
break;
diff --git a/llvm/lib/Target/X86/X86InstrSystem.td b/llvm/lib/Target/X86/X86InstrSystem.td
index a7899a2..d051047 100644
--- a/llvm/lib/Target/X86/X86InstrSystem.td
+++ b/llvm/lib/Target/X86/X86InstrSystem.td
@@ -213,6 +213,7 @@ def MOV16sm : I<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i16mem:$src),
let SchedRW = [WriteSystem] in {
def SWAPGS : I<0x01, MRM_F8, (outs), (ins), "swapgs", []>, TB;
+let Defs = [EFLAGS] in {
let mayLoad = 1 in
def LAR16rm : I<0x02, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"lar{w}\t{$src, $dst|$dst, $src}", []>, TB,
@@ -253,6 +254,7 @@ def LSL64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
"lsl{q}\t{$src, $dst|$dst, $src}", []>, TB;
def LSL64rr : RI<0x03, MRMSrcReg, (outs GR64:$dst), (ins GR16orGR32orGR64:$src),
"lsl{q}\t{$src, $dst|$dst, $src}", []>, TB;
+}
def INVLPG : I<0x01, MRM7m, (outs), (ins i8mem:$addr), "invlpg\t$addr", []>, TB;
@@ -364,12 +366,14 @@ def LGS32rm : I<0xb5, MRMSrcMem, (outs GR32:$dst), (ins opaquemem:$src),
def LGS64rm : RI<0xb5, MRMSrcMem, (outs GR64:$dst), (ins opaquemem:$src),
"lgs{q}\t{$src, $dst|$dst, $src}", []>, TB;
+let Defs = [EFLAGS] in {
def VERRr : I<0x00, MRM4r, (outs), (ins GR16:$seg), "verr\t$seg", []>, TB;
def VERWr : I<0x00, MRM5r, (outs), (ins GR16:$seg), "verw\t$seg", []>, TB;
let mayLoad = 1 in {
def VERRm : I<0x00, MRM4m, (outs), (ins i16mem:$seg), "verr\t$seg", []>, TB;
def VERWm : I<0x00, MRM5m, (outs), (ins i16mem:$seg), "verw\t$seg", []>, TB;
}
+} // Defs EFLAGS
} // SchedRW
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/X86/X86SchedIceLake.td b/llvm/lib/Target/X86/X86SchedIceLake.td
index 2c660fa..3144327 100644
--- a/llvm/lib/Target/X86/X86SchedIceLake.td
+++ b/llvm/lib/Target/X86/X86SchedIceLake.td
@@ -257,11 +257,11 @@ defm : X86WriteRes<WriteEMMS, [ICXPort05,ICXPort0156], 10, [9,1], 10>;
defm : ICXWriteResPair<WriteFAdd, [ICXPort01], 4, [1], 1, 5>; // Floating point add/sub.
defm : ICXWriteResPair<WriteFAddX, [ICXPort01], 4, [1], 1, 6>;
defm : ICXWriteResPair<WriteFAddY, [ICXPort01], 4, [1], 1, 7>;
-defm : ICXWriteResPair<WriteFAddZ, [ICXPort05], 4, [1], 1, 7>;
+defm : ICXWriteResPair<WriteFAddZ, [ICXPort0], 4, [1], 1, 7>;
defm : ICXWriteResPair<WriteFAdd64, [ICXPort01], 4, [1], 1, 5>; // Floating point double add/sub.
defm : ICXWriteResPair<WriteFAdd64X, [ICXPort01], 4, [1], 1, 6>;
defm : ICXWriteResPair<WriteFAdd64Y, [ICXPort01], 4, [1], 1, 7>;
-defm : ICXWriteResPair<WriteFAdd64Z, [ICXPort05], 4, [1], 1, 7>;
+defm : ICXWriteResPair<WriteFAdd64Z, [ICXPort0], 4, [1], 1, 7>;
defm : ICXWriteResPair<WriteFCmp, [ICXPort01], 4, [1], 1, 5>; // Floating point compare.
defm : ICXWriteResPair<WriteFCmpX, [ICXPort01], 4, [1], 1, 6>;
@@ -278,11 +278,11 @@ defm : ICXWriteResPair<WriteFComX, [ICXPort0], 2>; // Floating point compa
defm : ICXWriteResPair<WriteFMul, [ICXPort01], 4, [1], 1, 5>; // Floating point multiplication.
defm : ICXWriteResPair<WriteFMulX, [ICXPort01], 4, [1], 1, 6>;
defm : ICXWriteResPair<WriteFMulY, [ICXPort01], 4, [1], 1, 7>;
-defm : ICXWriteResPair<WriteFMulZ, [ICXPort05], 4, [1], 1, 7>;
+defm : ICXWriteResPair<WriteFMulZ, [ICXPort0], 4, [1], 1, 7>;
defm : ICXWriteResPair<WriteFMul64, [ICXPort01], 4, [1], 1, 5>; // Floating point double multiplication.
defm : ICXWriteResPair<WriteFMul64X, [ICXPort01], 4, [1], 1, 6>;
defm : ICXWriteResPair<WriteFMul64Y, [ICXPort01], 4, [1], 1, 7>;
-defm : ICXWriteResPair<WriteFMul64Z, [ICXPort05], 4, [1], 1, 7>;
+defm : ICXWriteResPair<WriteFMul64Z, [ICXPort0], 4, [1], 1, 7>;
defm : ICXWriteResPair<WriteFDiv, [ICXPort0,ICXFPDivider], 11, [1,3], 1, 5>; // 10-14 cycles. // Floating point division.
defm : ICXWriteResPair<WriteFDivX, [ICXPort0,ICXFPDivider], 11, [1,3], 1, 6>; // 10-14 cycles.
diff --git a/llvm/lib/TargetParser/ARMTargetParser.cpp b/llvm/lib/TargetParser/ARMTargetParser.cpp
index 67f937e..fac7019 100644
--- a/llvm/lib/TargetParser/ARMTargetParser.cpp
+++ b/llvm/lib/TargetParser/ARMTargetParser.cpp
@@ -94,6 +94,64 @@ unsigned ARM::parseArchVersion(StringRef Arch) {
llvm_unreachable("Unhandled architecture");
}
+unsigned ARM::parseArchMinorVersion(StringRef Arch) {
+ Arch = getCanonicalArchName(Arch);
+ switch (parseArch(Arch)) {
+ case ArchKind::ARMV4:
+ case ArchKind::ARMV4T:
+ case ArchKind::ARMV5T:
+ case ArchKind::ARMV5TE:
+ case ArchKind::IWMMXT:
+ case ArchKind::IWMMXT2:
+ case ArchKind::XSCALE:
+ case ArchKind::ARMV5TEJ:
+ case ArchKind::ARMV6:
+ case ArchKind::ARMV6K:
+ case ArchKind::ARMV6T2:
+ case ArchKind::ARMV6KZ:
+ case ArchKind::ARMV6M:
+ case ArchKind::ARMV7A:
+ case ArchKind::ARMV7VE:
+ case ArchKind::ARMV7R:
+ case ArchKind::ARMV7M:
+ case ArchKind::ARMV7S:
+ case ArchKind::ARMV7EM:
+ case ArchKind::ARMV7K:
+ case ArchKind::ARMV8A:
+ case ArchKind::ARMV8R:
+ case ArchKind::ARMV8MBaseline:
+ case ArchKind::ARMV8MMainline:
+ case ArchKind::ARMV9A:
+ case ArchKind::INVALID:
+ return 0;
+ case ArchKind::ARMV8_1A:
+ case ArchKind::ARMV8_1MMainline:
+ case ArchKind::ARMV9_1A:
+ return 1;
+ case ArchKind::ARMV8_2A:
+ case ArchKind::ARMV9_2A:
+ return 2;
+ case ArchKind::ARMV8_3A:
+ case ArchKind::ARMV9_3A:
+ return 3;
+ case ArchKind::ARMV8_4A:
+ case ArchKind::ARMV9_4A:
+ return 4;
+ case ArchKind::ARMV8_5A:
+ case ArchKind::ARMV9_5A:
+ return 5;
+ case ArchKind::ARMV8_6A:
+ return 6;
+ case ArchKind::ARMV8_7A:
+ return 7;
+ case ArchKind::ARMV8_8A:
+ return 8;
+ case ArchKind::ARMV8_9A:
+ return 9;
+ }
+ llvm_unreachable("Unhandled architecture");
+}
+
static ARM::ProfileKind getProfileKind(ARM::ArchKind AK) {
switch (AK) {
case ARM::ArchKind::ARMV6M:
diff --git a/llvm/lib/TargetParser/TargetParser.cpp b/llvm/lib/TargetParser/TargetParser.cpp
index 684d698..a31027c 100644
--- a/llvm/lib/TargetParser/TargetParser.cpp
+++ b/llvm/lib/TargetParser/TargetParser.cpp
@@ -128,8 +128,8 @@ constexpr GPUInfo AMDGCNGPUs[] = {
{{"gfx1201"}, {"gfx1201"}, GK_GFX1201, FEATURE_FAST_FMA_F32|FEATURE_FAST_DENORMAL_F32|FEATURE_WAVE32|FEATURE_WGP},
{{"gfx9-generic"}, {"gfx9-generic"}, GK_GFX9_GENERIC, FEATURE_FAST_FMA_F32|FEATURE_FAST_DENORMAL_F32|FEATURE_XNACK},
- {{"gfx10.1-generic"}, {"gfx10.1-generic"}, GK_GFX10_1_GENERIC, FEATURE_FAST_FMA_F32|FEATURE_FAST_DENORMAL_F32|FEATURE_WAVE32|FEATURE_XNACK|FEATURE_WGP},
- {{"gfx10.3-generic"}, {"gfx10.3-generic"}, GK_GFX10_3_GENERIC, FEATURE_FAST_FMA_F32|FEATURE_FAST_DENORMAL_F32|FEATURE_WAVE32|FEATURE_WGP},
+ {{"gfx10-1-generic"}, {"gfx10-1-generic"}, GK_GFX10_1_GENERIC, FEATURE_FAST_FMA_F32|FEATURE_FAST_DENORMAL_F32|FEATURE_WAVE32|FEATURE_XNACK|FEATURE_WGP},
+ {{"gfx10-3-generic"}, {"gfx10-3-generic"}, GK_GFX10_3_GENERIC, FEATURE_FAST_FMA_F32|FEATURE_FAST_DENORMAL_F32|FEATURE_WAVE32|FEATURE_WGP},
{{"gfx11-generic"}, {"gfx11-generic"}, GK_GFX11_GENERIC, FEATURE_FAST_FMA_F32|FEATURE_FAST_DENORMAL_F32|FEATURE_WAVE32|FEATURE_WGP},
// clang-format on
};
diff --git a/llvm/lib/ToolDrivers/llvm-dlltool/DlltoolDriver.cpp b/llvm/lib/ToolDrivers/llvm-dlltool/DlltoolDriver.cpp
index 0749580..98795c5 100644
--- a/llvm/lib/ToolDrivers/llvm-dlltool/DlltoolDriver.cpp
+++ b/llvm/lib/ToolDrivers/llvm-dlltool/DlltoolDriver.cpp
@@ -110,6 +110,46 @@ std::optional<std::string> getPrefix(StringRef Argv0) {
return ProgName.str();
}
+bool parseModuleDefinition(StringRef DefFileName, MachineTypes Machine,
+ bool AddUnderscores,
+ std::vector<COFFShortExport> &Exports,
+ std::string &OutputFile) {
+ std::unique_ptr<MemoryBuffer> MB = openFile(DefFileName);
+ if (!MB)
+ return false;
+
+ if (!MB->getBufferSize()) {
+ llvm::errs() << "definition file empty\n";
+ return false;
+ }
+
+ Expected<COFFModuleDefinition> Def = parseCOFFModuleDefinition(
+ *MB, Machine, /*MingwDef=*/true, AddUnderscores);
+ if (!Def) {
+ llvm::errs() << "error parsing definition\n"
+ << errorToErrorCode(Def.takeError()).message() << "\n";
+ return false;
+ }
+
+ if (OutputFile.empty())
+ OutputFile = std::move(Def->OutputFile);
+
+ // If ExtName is set (if the "ExtName = Name" syntax was used), overwrite
+ // Name with ExtName and clear ExtName. When only creating an import
+ // library and not linking, the internal name is irrelevant. This avoids
+ // cases where writeImportLibrary tries to transplant decoration from
+ // symbol decoration onto ExtName.
+ for (COFFShortExport &E : Def->Exports) {
+ if (!E.ExtName.empty()) {
+ E.Name = E.ExtName;
+ E.ExtName.clear();
+ }
+ }
+
+ Exports = std::move(Def->Exports);
+ return true;
+}
+
} // namespace
int llvm::dlltoolDriverMain(llvm::ArrayRef<const char *> ArgsArr) {
@@ -141,16 +181,6 @@ int llvm::dlltoolDriverMain(llvm::ArrayRef<const char *> ArgsArr) {
return 1;
}
- std::unique_ptr<MemoryBuffer> MB =
- openFile(Args.getLastArg(OPT_d)->getValue());
- if (!MB)
- return 1;
-
- if (!MB->getBufferSize()) {
- llvm::errs() << "definition file empty\n";
- return 1;
- }
-
COFF::MachineTypes Machine = getDefaultMachine();
if (std::optional<std::string> Prefix = getPrefix(ArgsArr[0])) {
Triple T(*Prefix);
@@ -166,40 +196,23 @@ int llvm::dlltoolDriverMain(llvm::ArrayRef<const char *> ArgsArr) {
}
bool AddUnderscores = !Args.hasArg(OPT_no_leading_underscore);
- Expected<COFFModuleDefinition> Def = parseCOFFModuleDefinition(
- *MB, Machine, /*MingwDef=*/true, AddUnderscores);
- if (!Def) {
- llvm::errs() << "error parsing definition\n"
- << errorToErrorCode(Def.takeError()).message() << "\n";
- return 1;
- }
-
- // Do this after the parser because parseCOFFModuleDefinition sets OutputFile.
+ std::string OutputFile;
if (auto *Arg = Args.getLastArg(OPT_D))
- Def->OutputFile = Arg->getValue();
+ OutputFile = Arg->getValue();
- if (Def->OutputFile.empty()) {
- llvm::errs() << "no DLL name specified\n";
+ std::vector<COFFShortExport> Exports;
+ if (!parseModuleDefinition(Args.getLastArg(OPT_d)->getValue(), Machine,
+ AddUnderscores, Exports, OutputFile))
return 1;
- }
- std::string Path = std::string(Args.getLastArgValue(OPT_l));
-
- // If ExtName is set (if the "ExtName = Name" syntax was used), overwrite
- // Name with ExtName and clear ExtName. When only creating an import
- // library and not linking, the internal name is irrelevant. This avoids
- // cases where writeImportLibrary tries to transplant decoration from
- // symbol decoration onto ExtName.
- for (COFFShortExport& E : Def->Exports) {
- if (!E.ExtName.empty()) {
- E.Name = E.ExtName;
- E.ExtName.clear();
- }
+ if (OutputFile.empty()) {
+ llvm::errs() << "no DLL name specified\n";
+ return 1;
}
if (Machine == IMAGE_FILE_MACHINE_I386 && Args.hasArg(OPT_k)) {
- for (COFFShortExport& E : Def->Exports) {
+ for (COFFShortExport &E : Exports) {
if (!E.AliasTarget.empty() || (!E.Name.empty() && E.Name[0] == '?'))
continue;
E.SymbolName = E.Name;
@@ -215,9 +228,9 @@ int llvm::dlltoolDriverMain(llvm::ArrayRef<const char *> ArgsArr) {
}
}
- if (!Path.empty() &&
- writeImportLibrary(Def->OutputFile, Path, Def->Exports, std::nullopt,
- Machine, /*MinGW=*/true))
+ std::string Path = std::string(Args.getLastArgValue(OPT_l));
+ if (!Path.empty() && writeImportLibrary(OutputFile, Path, Exports, Machine,
+ /*MinGW=*/true))
return 1;
return 0;
}
diff --git a/llvm/lib/ToolDrivers/llvm-lib/LibDriver.cpp b/llvm/lib/ToolDrivers/llvm-lib/LibDriver.cpp
index 3baa0a08c..c3015d8 100644
--- a/llvm/lib/ToolDrivers/llvm-lib/LibDriver.cpp
+++ b/llvm/lib/ToolDrivers/llvm-lib/LibDriver.cpp
@@ -418,9 +418,8 @@ int llvm::libDriverMain(ArrayRef<const char *> ArgsArr) {
OutputFile = std::move(NativeDef->OutputFile);
}
- return writeImportLibrary(OutputFile, OutputPath, Def->Exports,
- NativeExports, LibMachine,
- /*MinGW=*/false)
+ return writeImportLibrary(OutputFile, OutputPath, Def->Exports, LibMachine,
+ /*MinGW=*/false, NativeExports)
? 1
: 0;
}
diff --git a/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp b/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp
index e5f9fa1..dd6062d 100644
--- a/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp
+++ b/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp
@@ -580,11 +580,22 @@ PreservedAnalyses
llvm::ThinLTOBitcodeWriterPass::run(Module &M, ModuleAnalysisManager &AM) {
FunctionAnalysisManager &FAM =
AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
+
+ // RemoveDIs: there's no bitcode representation of the DPValue debug-info,
+ // convert to dbg.values before writing out.
+ bool IsNewDbgInfoFormat = M.IsNewDbgInfoFormat;
+ if (IsNewDbgInfoFormat)
+ M.convertFromNewDbgValues();
+
bool Changed = writeThinLTOBitcode(
OS, ThinLinkOS,
[&FAM](Function &F) -> AAResults & {
return FAM.getResult<AAManager>(F);
},
M, &AM.getResult<ModuleSummaryIndexAnalysis>(M));
+
+ if (IsNewDbgInfoFormat)
+ M.convertToNewDbgValues();
+
return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all();
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 4465eb8..0af9a27 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -4450,7 +4450,7 @@ Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) {
}
// ~(X + C) --> ~C - X
- if (match(NotVal, m_c_Add(m_Value(X), m_ImmConstant(C))))
+ if (match(NotVal, m_Add(m_Value(X), m_ImmConstant(C))))
return BinaryOperator::CreateSub(ConstantExpr::getNot(C), X);
// ~(X - Y) --> ~X + Y
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 56d1259..5266808 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -902,7 +902,8 @@ Instruction *InstCombinerImpl::foldIntrinsicIsFPClass(IntrinsicInst &II) {
const FPClassTest OrderedMask = Mask & ~fcNan;
const FPClassTest OrderedInvertedMask = ~OrderedMask & ~fcNan;
- const bool IsStrict = II.isStrictFP();
+ const bool IsStrict =
+ II.getFunction()->getAttributes().hasFnAttr(Attribute::StrictFP);
Value *FNegSrc;
if (match(Src0, m_FNeg(m_Value(FNegSrc)))) {
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 280c4d7..1104ea8 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -6021,7 +6021,7 @@ static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth) {
// If this is a normal comparison, it demands all bits. If it is a sign bit
// comparison, it only demands the sign bit.
bool UnusedBit;
- if (InstCombiner::isSignBitCheck(I.getPredicate(), *RHS, UnusedBit))
+ if (isSignBitCheck(I.getPredicate(), *RHS, UnusedBit))
return APInt::getSignMask(BitWidth);
switch (I.getPredicate()) {
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp b/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp
index 62e4946..f73679f 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp
@@ -258,9 +258,9 @@ std::array<Value *, 2> Negator::getSortedOperandsOfBinOp(Instruction *I) {
case Instruction::And: {
Constant *ShAmt;
// sub(y,and(lshr(x,C),1)) --> add(ashr(shl(x,(BW-1)-C),BW-1),y)
- if (match(I, m_c_And(m_OneUse(m_TruncOrSelf(
- m_LShr(m_Value(X), m_ImmConstant(ShAmt)))),
- m_One()))) {
+ if (match(I, m_And(m_OneUse(m_TruncOrSelf(
+ m_LShr(m_Value(X), m_ImmConstant(ShAmt)))),
+ m_One()))) {
unsigned BW = X->getType()->getScalarSizeInBits();
Constant *BWMinusOne = ConstantInt::get(X->getType(), BW - 1);
Value *R = Builder.CreateShl(X, Builder.CreateSub(BWMinusOne, ShAmt));
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 5270378..71fa9b9 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -2381,8 +2381,7 @@ static Instruction *foldSelectToCopysign(SelectInst &Sel,
ICmpInst::Predicate Pred;
if (!match(Cond, m_OneUse(m_ICmp(Pred, m_ElementWiseBitCast(m_Value(X)),
m_APInt(C)))) ||
- !InstCombiner::isSignBitCheck(Pred, *C, IsTrueIfSignSet) ||
- X->getType() != SelType)
+ !isSignBitCheck(Pred, *C, IsTrueIfSignSet) || X->getType() != SelType)
return nullptr;
// If needed, negate the value that will be the sign argument of the copysign:
@@ -2581,7 +2580,7 @@ static Instruction *foldSelectWithSRem(SelectInst &SI, InstCombinerImpl &IC,
bool TrueIfSigned = false;
if (!(match(CondVal, m_ICmp(Pred, m_Value(RemRes), m_APInt(C))) &&
- IC.isSignBitCheck(Pred, *C, TrueIfSigned)))
+ isSignBitCheck(Pred, *C, TrueIfSigned)))
return nullptr;
// If the sign bit is not set, we have a SGE/SGT comparison, and the operands
@@ -2781,7 +2780,7 @@ static Instruction *foldSelectWithFCmpToFabs(SelectInst &SI,
bool TrueIfSigned;
if (!match(CondVal,
m_ICmp(Pred, m_ElementWiseBitCast(m_Specific(X)), m_APInt(C))) ||
- !IC.isSignBitCheck(Pred, *C, TrueIfSigned))
+ !isSignBitCheck(Pred, *C, TrueIfSigned))
continue;
if (!match(TrueVal, m_FNeg(m_Specific(X))))
return nullptr;
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 3fbe98f..eafd288 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -1206,6 +1206,12 @@ Instruction *InstCombinerImpl::visitShl(BinaryOperator &I) {
return BinaryOperator::CreateAnd(Mask, X);
}
+ // Transform (-1 >> y) << y to -1 << y
+ if (match(Op0, m_LShr(m_AllOnes(), m_Specific(Op1)))) {
+ Constant *AllOnes = ConstantInt::getAllOnesValue(Ty);
+ return BinaryOperator::CreateShl(AllOnes, Op1);
+ }
+
Constant *C1;
if (match(Op1, m_Constant(C1))) {
Constant *C2;
@@ -1493,6 +1499,12 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
return BinaryOperator::CreateAnd(Mask, X);
}
+ // Transform (-1 << y) >> y to -1 >> y
+ if (match(Op0, m_Shl(m_AllOnes(), m_Specific(Op1)))) {
+ Constant *AllOnes = ConstantInt::getAllOnesValue(Ty);
+ return BinaryOperator::CreateLShr(AllOnes, Op1);
+ }
+
if (Instruction *Overflow = foldLShrOverflowBit(I))
return Overflow;
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index b1e2262..7450f39 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -142,12 +142,6 @@ static cl::opt<unsigned>
MaxArraySize("instcombine-maxarray-size", cl::init(1024),
cl::desc("Maximum array size considered when doing a combine"));
-// TODO: Remove this option
-static cl::opt<bool> EnableSimplifyDemandedUseFPClass(
- "instcombine-simplify-demanded-fp-class",
- cl::desc("Enable demanded floating-point class optimizations"),
- cl::init(false));
-
// FIXME: Remove this flag when it is no longer necessary to convert
// llvm.dbg.declare to avoid inaccurate debug info. Setting this to false
// increases variable availability at the cost of accuracy. Variables that
@@ -3111,9 +3105,6 @@ Instruction *InstCombinerImpl::visitFree(CallInst &FI, Value *Op) {
}
Instruction *InstCombinerImpl::visitReturnInst(ReturnInst &RI) {
- if (!EnableSimplifyDemandedUseFPClass)
- return nullptr;
-
Value *RetVal = RI.getReturnValue();
if (!RetVal || !AttributeFuncs::isNoFPClassCompatibleType(RetVal->getType()))
return nullptr;
diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index c2a4632..393afc9 100644
--- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -1371,6 +1371,14 @@ static bool isLifetimeIntrinsic(Value *V) {
return II && II->isLifetimeStartOrEnd();
}
+static DbgAssignIntrinsic *DynCastToDbgAssign(DbgVariableIntrinsic *DVI) {
+ return dyn_cast<DbgAssignIntrinsic>(DVI);
+}
+
+static DPValue *DynCastToDbgAssign(DPValue *DPV) {
+ return DPV->isDbgAssign() ? DPV : nullptr;
+}
+
bool HWAddressSanitizer::instrumentStack(memtag::StackInfo &SInfo,
Value *StackTag, Value *UARTag,
const DominatorTree &DT,
@@ -1437,6 +1445,11 @@ bool HWAddressSanitizer::instrumentStack(memtag::StackInfo &SInfo,
if (DPtr->getVariableLocationOp(LocNo) == AI)
DPtr->setExpression(DIExpression::appendOpsToArg(
DPtr->getExpression(), NewOps, LocNo));
+ if (auto *DAI = DynCastToDbgAssign(DPtr)) {
+ if (DAI->getAddress() == AI)
+ DAI->setAddressExpression(DIExpression::prependOpcodes(
+ DAI->getAddressExpression(), NewOps));
+ }
};
llvm::for_each(Info.DbgVariableIntrinsics, AnnotateDbgRecord);
diff --git a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
index 24e1677..9235850 100644
--- a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
+++ b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
@@ -371,7 +371,6 @@ static bool processSwitch(SwitchInst *I, LazyValueInfo *LVI,
{ // Scope for SwitchInstProfUpdateWrapper. It must not live during
// ConstantFoldTerminator() as the underlying SwitchInst can be changed.
SwitchInstProfUpdateWrapper SI(*I);
- unsigned ReachableCaseCount = 0;
for (auto CI = SI->case_begin(), CE = SI->case_end(); CI != CE;) {
ConstantInt *Case = CI->getCaseValue();
@@ -408,33 +407,6 @@ static bool processSwitch(SwitchInst *I, LazyValueInfo *LVI,
// Increment the case iterator since we didn't delete it.
++CI;
- ++ReachableCaseCount;
- }
-
- BasicBlock *DefaultDest = SI->getDefaultDest();
- if (ReachableCaseCount > 1 &&
- !isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg())) {
- ConstantRange CR = LVI->getConstantRangeAtUse(I->getOperandUse(0),
- /*UndefAllowed*/ false);
- // The default dest is unreachable if all cases are covered.
- if (!CR.isSizeLargerThan(ReachableCaseCount)) {
- BasicBlock *NewUnreachableBB =
- BasicBlock::Create(BB->getContext(), "default.unreachable",
- BB->getParent(), DefaultDest);
- new UnreachableInst(BB->getContext(), NewUnreachableBB);
-
- DefaultDest->removePredecessor(BB);
- SI->setDefaultDest(NewUnreachableBB);
-
- if (SuccessorsCount[DefaultDest] == 1)
- DTU.applyUpdatesPermissive(
- {{DominatorTree::Delete, BB, DefaultDest}});
- DTU.applyUpdatesPermissive(
- {{DominatorTree::Insert, BB, NewUnreachableBB}});
-
- ++NumDeadCases;
- Changed = true;
- }
}
}
@@ -1255,12 +1227,6 @@ CorrelatedValuePropagationPass::run(Function &F, FunctionAnalysisManager &AM) {
if (!Changed) {
PA = PreservedAnalyses::all();
} else {
-#if defined(EXPENSIVE_CHECKS)
- assert(DT->verify(DominatorTree::VerificationLevel::Full));
-#else
- assert(DT->verify(DominatorTree::VerificationLevel::Fast));
-#endif // EXPENSIVE_CHECKS
-
PA.preserve<DominatorTreeAnalysis>();
PA.preserve<LazyValueAnalysis>();
}
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 663c06e..d30c68a 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -1904,6 +1904,57 @@ struct DSEState {
return true;
}
+ // Check if there is a dominating condition, that implies that the value
+ // being stored in a ptr is already present in the ptr.
+ bool dominatingConditionImpliesValue(MemoryDef *Def) {
+ auto *StoreI = cast<StoreInst>(Def->getMemoryInst());
+ BasicBlock *StoreBB = StoreI->getParent();
+ Value *StorePtr = StoreI->getPointerOperand();
+ Value *StoreVal = StoreI->getValueOperand();
+
+ DomTreeNode *IDom = DT.getNode(StoreBB)->getIDom();
+ if (!IDom)
+ return false;
+
+ auto *BI = dyn_cast<BranchInst>(IDom->getBlock()->getTerminator());
+ if (!BI || !BI->isConditional())
+ return false;
+
+ // In case both blocks are the same, it is not possible to determine
+ // if optimization is possible. (We would not want to optimize a store
+ // in the FalseBB if condition is true and vice versa.)
+ if (BI->getSuccessor(0) == BI->getSuccessor(1))
+ return false;
+
+ Instruction *ICmpL;
+ ICmpInst::Predicate Pred;
+ if (!match(BI->getCondition(),
+ m_c_ICmp(Pred,
+ m_CombineAnd(m_Load(m_Specific(StorePtr)),
+ m_Instruction(ICmpL)),
+ m_Specific(StoreVal))) ||
+ !ICmpInst::isEquality(Pred))
+ return false;
+
+ // In case the else blocks also branches to the if block or the other way
+ // around it is not possible to determine if the optimization is possible.
+ if (Pred == ICmpInst::ICMP_EQ &&
+ !DT.dominates(BasicBlockEdge(BI->getParent(), BI->getSuccessor(0)),
+ StoreBB))
+ return false;
+
+ if (Pred == ICmpInst::ICMP_NE &&
+ !DT.dominates(BasicBlockEdge(BI->getParent(), BI->getSuccessor(1)),
+ StoreBB))
+ return false;
+
+ MemoryAccess *LoadAcc = MSSA.getMemoryAccess(ICmpL);
+ MemoryAccess *ClobAcc =
+ MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def, BatchAA);
+
+ return MSSA.dominates(ClobAcc, LoadAcc);
+ }
+
/// \returns true if \p Def is a no-op store, either because it
/// directly stores back a loaded value or stores zero to a calloced object.
bool storeIsNoop(MemoryDef *Def, const Value *DefUO) {
@@ -1934,6 +1985,9 @@ struct DSEState {
if (!Store)
return false;
+ if (dominatingConditionImpliesValue(Def))
+ return true;
+
if (auto *LoadI = dyn_cast<LoadInst>(Store->getOperand(0))) {
if (LoadI->getPointerOperand() == Store->getOperand(1)) {
// Get the defining access for the load.
diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp
index f3e40a5..9ec9c31 100644
--- a/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -110,6 +110,9 @@ STATISTIC(NumAddSubHoisted, "Number of add/subtract expressions reassociated "
"and hoisted out of the loop");
STATISTIC(NumFPAssociationsHoisted, "Number of invariant FP expressions "
"reassociated and hoisted out of the loop");
+STATISTIC(NumIntAssociationsHoisted,
+ "Number of invariant int expressions "
+ "reassociated and hoisted out of the loop");
/// Memory promotion is enabled by default.
static cl::opt<bool>
@@ -135,6 +138,12 @@ static cl::opt<unsigned> FPAssociationUpperLimit(
"Set upper limit for the number of transformations performed "
"during a single round of hoisting the reassociated expressions."));
+cl::opt<unsigned> IntAssociationUpperLimit(
+ "licm-max-num-int-reassociations", cl::init(5U), cl::Hidden,
+ cl::desc(
+ "Set upper limit for the number of transformations performed "
+ "during a single round of hoisting the reassociated expressions."));
+
// Experimental option to allow imprecision in LICM in pathological cases, in
// exchange for faster compile. This is to be removed if MemorySSA starts to
// address the same issue. LICM calls MemorySSAWalker's
@@ -2661,21 +2670,29 @@ static bool hoistAddSub(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
return false;
}
+static bool isReassociableOp(Instruction *I, unsigned IntOpcode,
+ unsigned FPOpcode) {
+ if (I->getOpcode() == IntOpcode)
+ return true;
+ if (I->getOpcode() == FPOpcode && I->hasAllowReassoc() &&
+ I->hasNoSignedZeros())
+ return true;
+ return false;
+}
+
/// Try to reassociate expressions like ((A1 * B1) + (A2 * B2) + ...) * C where
/// A1, A2, ... and C are loop invariants into expressions like
/// ((A1 * C * B1) + (A2 * C * B2) + ...) and hoist the (A1 * C), (A2 * C), ...
/// invariant expressions. This functions returns true only if any hoisting has
/// actually occured.
-static bool hoistFPAssociation(Instruction &I, Loop &L,
- ICFLoopSafetyInfo &SafetyInfo,
- MemorySSAUpdater &MSSAU, AssumptionCache *AC,
- DominatorTree *DT) {
- using namespace PatternMatch;
- Value *VariantOp = nullptr, *InvariantOp = nullptr;
-
- if (!match(&I, m_FMul(m_Value(VariantOp), m_Value(InvariantOp))) ||
- !I.hasAllowReassoc() || !I.hasNoSignedZeros())
+static bool hoistMulAddAssociation(Instruction &I, Loop &L,
+ ICFLoopSafetyInfo &SafetyInfo,
+ MemorySSAUpdater &MSSAU, AssumptionCache *AC,
+ DominatorTree *DT) {
+ if (!isReassociableOp(&I, Instruction::Mul, Instruction::FMul))
return false;
+ Value *VariantOp = I.getOperand(0);
+ Value *InvariantOp = I.getOperand(1);
if (L.isLoopInvariant(VariantOp))
std::swap(VariantOp, InvariantOp);
if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp))
@@ -2689,15 +2706,17 @@ static bool hoistFPAssociation(Instruction &I, Loop &L,
Worklist.push_back(VariantBinOp);
while (!Worklist.empty()) {
BinaryOperator *BO = Worklist.pop_back_val();
- if (!BO->hasOneUse() || !BO->hasAllowReassoc() || !BO->hasNoSignedZeros())
+ if (!BO->hasOneUse())
return false;
- BinaryOperator *Op0, *Op1;
- if (match(BO, m_FAdd(m_BinOp(Op0), m_BinOp(Op1)))) {
- Worklist.push_back(Op0);
- Worklist.push_back(Op1);
+ if (isReassociableOp(BO, Instruction::Add, Instruction::FAdd) &&
+ isa<BinaryOperator>(BO->getOperand(0)) &&
+ isa<BinaryOperator>(BO->getOperand(1))) {
+ Worklist.push_back(cast<BinaryOperator>(BO->getOperand(0)));
+ Worklist.push_back(cast<BinaryOperator>(BO->getOperand(1)));
continue;
}
- if (BO->getOpcode() != Instruction::FMul || L.isLoopInvariant(BO))
+ if (!isReassociableOp(BO, Instruction::Mul, Instruction::FMul) ||
+ L.isLoopInvariant(BO))
return false;
Use &U0 = BO->getOperandUse(0);
Use &U1 = BO->getOperandUse(1);
@@ -2707,7 +2726,10 @@ static bool hoistFPAssociation(Instruction &I, Loop &L,
Changes.push_back(&U1);
else
return false;
- if (Changes.size() > FPAssociationUpperLimit)
+ unsigned Limit = I.getType()->isIntOrIntVectorTy()
+ ? IntAssociationUpperLimit
+ : FPAssociationUpperLimit;
+ if (Changes.size() > Limit)
return false;
}
if (Changes.empty())
@@ -2720,7 +2742,12 @@ static bool hoistFPAssociation(Instruction &I, Loop &L,
for (auto *U : Changes) {
assert(L.isLoopInvariant(U->get()));
Instruction *Ins = cast<Instruction>(U->getUser());
- U->set(Builder.CreateFMulFMF(U->get(), Factor, Ins, "factor.op.fmul"));
+ Value *Mul;
+ if (I.getType()->isIntOrIntVectorTy())
+ Mul = Builder.CreateMul(U->get(), Factor, "factor.op.mul");
+ else
+ Mul = Builder.CreateFMulFMF(U->get(), Factor, Ins, "factor.op.fmul");
+ U->set(Mul);
}
I.replaceAllUsesWith(VariantOp);
eraseInstruction(I, SafetyInfo, MSSAU);
@@ -2754,9 +2781,13 @@ static bool hoistArithmetics(Instruction &I, Loop &L,
return true;
}
- if (hoistFPAssociation(I, L, SafetyInfo, MSSAU, AC, DT)) {
+ bool IsInt = I.getType()->isIntOrIntVectorTy();
+ if (hoistMulAddAssociation(I, L, SafetyInfo, MSSAU, AC, DT)) {
++NumHoisted;
- ++NumFPAssociationsHoisted;
+ if (IsInt)
+ ++NumIntAssociationsHoisted;
+ else
+ ++NumFPAssociationsHoisted;
return true;
}
diff --git a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
index 4481375..5124909 100644
--- a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
+++ b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
@@ -391,6 +391,11 @@ private:
/// and returns true if the splitting succeeds.
bool splitGEP(GetElementPtrInst *GEP);
+ /// Tries to reorder the given GEP with the GEP that produces the base if
+ /// doing so results in producing a constant offset as the outermost
+ /// index.
+ bool reorderGEP(GetElementPtrInst *GEP, TargetTransformInfo &TTI);
+
/// Lower a GEP with multiple indices into multiple GEPs with a single index.
/// Function splitGEP already split the original GEP into a variadic part and
/// a constant offset (i.e., AccumulativeByteOffset). This function lowers the
@@ -964,6 +969,66 @@ SeparateConstOffsetFromGEP::lowerToArithmetics(GetElementPtrInst *Variadic,
Variadic->eraseFromParent();
}
+bool SeparateConstOffsetFromGEP::reorderGEP(GetElementPtrInst *GEP,
+ TargetTransformInfo &TTI) {
+ Type *GEPType = GEP->getResultElementType();
+ // TODO: support reordering for non-trivial GEP chains
+ if (GEPType->isAggregateType() || GEP->getNumIndices() != 1)
+ return false;
+
+ auto PtrGEP = dyn_cast<GetElementPtrInst>(GEP->getPointerOperand());
+ if (!PtrGEP)
+ return false;
+ Type *PtrGEPType = PtrGEP->getResultElementType();
+ // TODO: support reordering for non-trivial GEP chains
+ if (PtrGEPType->isAggregateType() || PtrGEP->getNumIndices() != 1)
+ return false;
+
+ // TODO: support reordering for non-trivial GEP chains
+ if (PtrGEPType != GEPType ||
+ PtrGEP->getSourceElementType() != GEP->getSourceElementType())
+ return false;
+
+ bool NestedNeedsExtraction;
+ int64_t NestedByteOffset =
+ accumulateByteOffset(PtrGEP, NestedNeedsExtraction);
+ if (!NestedNeedsExtraction)
+ return false;
+
+ unsigned AddrSpace = PtrGEP->getPointerAddressSpace();
+ if (!TTI.isLegalAddressingMode(GEP->getResultElementType(),
+ /*BaseGV=*/nullptr, NestedByteOffset,
+ /*HasBaseReg=*/true, /*Scale=*/0, AddrSpace))
+ return false;
+
+ IRBuilder<> Builder(GEP);
+ Builder.SetCurrentDebugLocation(GEP->getDebugLoc());
+ bool GEPInBounds = GEP->isInBounds();
+ bool PtrGEPInBounds = PtrGEP->isInBounds();
+ bool IsChainInBounds = GEPInBounds && PtrGEPInBounds;
+ if (IsChainInBounds) {
+ auto GEPIdx = GEP->indices().begin();
+ auto KnownGEPIdx = computeKnownBits(GEPIdx->get(), *DL);
+ IsChainInBounds &= KnownGEPIdx.isNonNegative();
+ if (IsChainInBounds) {
+ auto PtrGEPIdx = GEP->indices().begin();
+ auto KnownPtrGEPIdx = computeKnownBits(PtrGEPIdx->get(), *DL);
+ IsChainInBounds &= KnownPtrGEPIdx.isNonNegative();
+ }
+ }
+
+ // For trivial GEP chains, we can swap the indicies.
+ auto NewSrc = Builder.CreateGEP(PtrGEPType, PtrGEP->getPointerOperand(),
+ SmallVector<Value *, 4>(GEP->indices()));
+ cast<GetElementPtrInst>(NewSrc)->setIsInBounds(IsChainInBounds);
+ auto NewGEP = Builder.CreateGEP(GEPType, NewSrc,
+ SmallVector<Value *, 4>(PtrGEP->indices()));
+ cast<GetElementPtrInst>(NewGEP)->setIsInBounds(IsChainInBounds);
+ GEP->replaceAllUsesWith(NewGEP);
+ RecursivelyDeleteTriviallyDeadInstructions(GEP);
+ return true;
+}
+
bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
// Skip vector GEPs.
if (GEP->getType()->isVectorTy())
@@ -979,11 +1044,13 @@ bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
bool NeedsExtraction;
int64_t AccumulativeByteOffset = accumulateByteOffset(GEP, NeedsExtraction);
- if (!NeedsExtraction)
- return Changed;
-
TargetTransformInfo &TTI = GetTTI(*GEP->getFunction());
+ if (!NeedsExtraction) {
+ Changed |= reorderGEP(GEP, TTI);
+ return Changed;
+ }
+
// If LowerGEP is disabled, before really splitting the GEP, check whether the
// backend supports the addressing mode we are about to produce. If no, this
// splitting probably won't be beneficial.
diff --git a/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp b/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp
index 4336695..1ffa003 100644
--- a/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp
+++ b/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp
@@ -110,18 +110,22 @@ Instruction *getUntagLocationIfFunctionExit(Instruction &Inst) {
}
void StackInfoBuilder::visit(Instruction &Inst) {
- // Check for non-intrinsic debug-info records.
+ // Visit non-intrinsic debug-info records attached to Inst.
for (auto &DPV : Inst.getDbgValueRange()) {
- for (Value *V : DPV.location_ops()) {
+ auto AddIfInteresting = [&](Value *V) {
if (auto *AI = dyn_cast_or_null<AllocaInst>(V)) {
if (!isInterestingAlloca(*AI))
- continue;
+ return;
AllocaInfo &AInfo = Info.AllocasToInstrument[AI];
auto &DPVVec = AInfo.DbgVariableRecords;
if (DPVVec.empty() || DPVVec.back() != &DPV)
DPVVec.push_back(&DPV);
}
- }
+ };
+
+ for_each(DPV.location_ops(), AddIfInteresting);
+ if (DPV.isDbgAssign())
+ AddIfInteresting(DPV.getAddress());
}
if (CallInst *CI = dyn_cast<CallInst>(&Inst)) {
@@ -152,16 +156,19 @@ void StackInfoBuilder::visit(Instruction &Inst) {
return;
}
if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&Inst)) {
- for (Value *V : DVI->location_ops()) {
+ auto AddIfInteresting = [&](Value *V) {
if (auto *AI = dyn_cast_or_null<AllocaInst>(V)) {
if (!isInterestingAlloca(*AI))
- continue;
+ return;
AllocaInfo &AInfo = Info.AllocasToInstrument[AI];
auto &DVIVec = AInfo.DbgVariableIntrinsics;
if (DVIVec.empty() || DVIVec.back() != DVI)
DVIVec.push_back(DVI);
}
- }
+ };
+ for_each(DVI->location_ops(), AddIfInteresting);
+ if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(DVI))
+ AddIfInteresting(DAI->getAddress());
}
Instruction *ExitUntag = getUntagLocationIfFunctionExit(Inst);
diff --git a/llvm/lib/Transforms/Utils/ValueMapper.cpp b/llvm/lib/Transforms/Utils/ValueMapper.cpp
index a8ae3ee..93a4c82 100644
--- a/llvm/lib/Transforms/Utils/ValueMapper.cpp
+++ b/llvm/lib/Transforms/Utils/ValueMapper.cpp
@@ -552,6 +552,7 @@ void Mapper::remapDPValue(DPValue &V) {
V.setKillAddress();
else if (NewAddr)
V.setAddress(NewAddr);
+ V.setAssignId(cast<DIAssignID>(mapMetadata(V.getAssignID())));
}
// Find Value operands and remap those.
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 1a7b301..98b177c 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -670,17 +670,6 @@ protected:
/// running the verifier. Return the preheader of the completed vector loop.
BasicBlock *completeLoopSkeleton();
- /// Collect poison-generating recipes that may generate a poison value that is
- /// used after vectorization, even when their operands are not poison. Those
- /// recipes meet the following conditions:
- /// * Contribute to the address computation of a recipe generating a widen
- /// memory load/store (VPWidenMemoryInstructionRecipe or
- /// VPInterleaveRecipe).
- /// * Such a widen memory load/store has at least one underlying Instruction
- /// that is in a basic block that needs predication and after vectorization
- /// the generated instruction won't be predicated.
- void collectPoisonGeneratingRecipes(VPTransformState &State);
-
/// Allow subclasses to override and print debug traces before/after vplan
/// execution, when trace information is requested.
virtual void printDebugTracesAtStart(){};
@@ -1069,91 +1058,6 @@ static std::string getDebugLocString(const Loop *L) {
}
#endif
-void InnerLoopVectorizer::collectPoisonGeneratingRecipes(
- VPTransformState &State) {
-
- // Collect recipes in the backward slice of `Root` that may generate a poison
- // value that is used after vectorization.
- SmallPtrSet<VPRecipeBase *, 16> Visited;
- auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) {
- SmallVector<VPRecipeBase *, 16> Worklist;
- Worklist.push_back(Root);
-
- // Traverse the backward slice of Root through its use-def chain.
- while (!Worklist.empty()) {
- VPRecipeBase *CurRec = Worklist.back();
- Worklist.pop_back();
-
- if (!Visited.insert(CurRec).second)
- continue;
-
- // Prune search if we find another recipe generating a widen memory
- // instruction. Widen memory instructions involved in address computation
- // will lead to gather/scatter instructions, which don't need to be
- // handled.
- if (isa<VPWidenMemoryInstructionRecipe>(CurRec) ||
- isa<VPInterleaveRecipe>(CurRec) ||
- isa<VPScalarIVStepsRecipe>(CurRec) ||
- isa<VPCanonicalIVPHIRecipe>(CurRec) ||
- isa<VPActiveLaneMaskPHIRecipe>(CurRec))
- continue;
-
- // This recipe contributes to the address computation of a widen
- // load/store. If the underlying instruction has poison-generating flags,
- // drop them directly.
- if (auto *RecWithFlags = dyn_cast<VPRecipeWithIRFlags>(CurRec)) {
- RecWithFlags->dropPoisonGeneratingFlags();
- } else {
- Instruction *Instr = dyn_cast_or_null<Instruction>(
- CurRec->getVPSingleValue()->getUnderlyingValue());
- (void)Instr;
- assert((!Instr || !Instr->hasPoisonGeneratingFlags()) &&
- "found instruction with poison generating flags not covered by "
- "VPRecipeWithIRFlags");
- }
-
- // Add new definitions to the worklist.
- for (VPValue *operand : CurRec->operands())
- if (VPRecipeBase *OpDef = operand->getDefiningRecipe())
- Worklist.push_back(OpDef);
- }
- });
-
- // Traverse all the recipes in the VPlan and collect the poison-generating
- // recipes in the backward slice starting at the address of a VPWidenRecipe or
- // VPInterleaveRecipe.
- auto Iter = vp_depth_first_deep(State.Plan->getEntry());
- for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
- for (VPRecipeBase &Recipe : *VPBB) {
- if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) {
- Instruction &UnderlyingInstr = WidenRec->getIngredient();
- VPRecipeBase *AddrDef = WidenRec->getAddr()->getDefiningRecipe();
- if (AddrDef && WidenRec->isConsecutive() &&
- Legal->blockNeedsPredication(UnderlyingInstr.getParent()))
- collectPoisonGeneratingInstrsInBackwardSlice(AddrDef);
- } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) {
- VPRecipeBase *AddrDef = InterleaveRec->getAddr()->getDefiningRecipe();
- if (AddrDef) {
- // Check if any member of the interleave group needs predication.
- const InterleaveGroup<Instruction> *InterGroup =
- InterleaveRec->getInterleaveGroup();
- bool NeedPredication = false;
- for (int I = 0, NumMembers = InterGroup->getNumMembers();
- I < NumMembers; ++I) {
- Instruction *Member = InterGroup->getMember(I);
- if (Member)
- NeedPredication |=
- Legal->blockNeedsPredication(Member->getParent());
- }
-
- if (NeedPredication)
- collectPoisonGeneratingInstrsInBackwardSlice(AddrDef);
- }
- }
- }
- }
-}
-
namespace llvm {
// Loop vectorization cost-model hints how the scalar epilogue loop should be
@@ -2106,16 +2010,18 @@ public:
BestTripCount = *EstimatedTC;
}
+ BestTripCount = std::max(BestTripCount, 1U);
InstructionCost NewMemCheckCost = MemCheckCost / BestTripCount;
// Let's ensure the cost is always at least 1.
NewMemCheckCost = std::max(*NewMemCheckCost.getValue(),
(InstructionCost::CostType)1);
- LLVM_DEBUG(dbgs()
- << "We expect runtime memory checks to be hoisted "
- << "out of the outer loop. Cost reduced from "
- << MemCheckCost << " to " << NewMemCheckCost << '\n');
+ if (BestTripCount > 1)
+ LLVM_DEBUG(dbgs()
+ << "We expect runtime memory checks to be hoisted "
+ << "out of the outer loop. Cost reduced from "
+ << MemCheckCost << " to " << NewMemCheckCost << '\n');
MemCheckCost = NewMemCheckCost;
}
@@ -7591,8 +7497,6 @@ LoopVectorizationPlanner::executePlan(
State.LVer->prepareNoAliasMetadata();
}
- ILV.collectPoisonGeneratingRecipes(State);
-
ILV.printDebugTracesAtStart();
//===------------------------------------------------===//
@@ -8869,6 +8773,10 @@ LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range) {
// in ways that accessing values using original IR values is incorrect.
Plan->disableValue2VPValue();
+ VPlanTransforms::dropPoisonGeneratingRecipes(*Plan, [this](BasicBlock *BB) {
+ return Legal->blockNeedsPredication(BB);
+ });
+
// Sink users of fixed-order recurrence past the recipe defining the previous
// value and introduce FirstOrderRecurrenceSplice VPInstructions.
if (!VPlanTransforms::adjustFixedOrderRecurrences(*Plan, Builder))
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index 162a3c4..13e1859 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -2085,8 +2085,11 @@ public:
~VPReplicateRecipe() override = default;
VPRecipeBase *clone() override {
- return new VPReplicateRecipe(getUnderlyingInstr(), operands(), IsUniform,
- isPredicated() ? getMask() : nullptr);
+ auto *Copy =
+ new VPReplicateRecipe(getUnderlyingInstr(), operands(), IsUniform,
+ isPredicated() ? getMask() : nullptr);
+ Copy->transferFlags(*this);
+ return Copy;
}
VP_CLASSOF_IMPL(VPDef::VPReplicateSC)
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 71f5285..16855a9 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -1201,3 +1201,86 @@ void VPlanTransforms::addActiveLaneMask(
CompareToReplace->eraseFromParent();
}
}
+
+void VPlanTransforms::dropPoisonGeneratingRecipes(
+ VPlan &Plan, function_ref<bool(BasicBlock *)> BlockNeedsPredication) {
+ // Collect recipes in the backward slice of `Root` that may generate a poison
+ // value that is used after vectorization.
+ SmallPtrSet<VPRecipeBase *, 16> Visited;
+ auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) {
+ SmallVector<VPRecipeBase *, 16> Worklist;
+ Worklist.push_back(Root);
+
+ // Traverse the backward slice of Root through its use-def chain.
+ while (!Worklist.empty()) {
+ VPRecipeBase *CurRec = Worklist.back();
+ Worklist.pop_back();
+
+ if (!Visited.insert(CurRec).second)
+ continue;
+
+ // Prune search if we find another recipe generating a widen memory
+ // instruction. Widen memory instructions involved in address computation
+ // will lead to gather/scatter instructions, which don't need to be
+ // handled.
+ if (isa<VPWidenMemoryInstructionRecipe>(CurRec) ||
+ isa<VPInterleaveRecipe>(CurRec) ||
+ isa<VPScalarIVStepsRecipe>(CurRec) ||
+ isa<VPCanonicalIVPHIRecipe>(CurRec) ||
+ isa<VPActiveLaneMaskPHIRecipe>(CurRec))
+ continue;
+
+ // This recipe contributes to the address computation of a widen
+ // load/store. If the underlying instruction has poison-generating flags,
+ // drop them directly.
+ if (auto *RecWithFlags = dyn_cast<VPRecipeWithIRFlags>(CurRec)) {
+ RecWithFlags->dropPoisonGeneratingFlags();
+ } else {
+ Instruction *Instr = dyn_cast_or_null<Instruction>(
+ CurRec->getVPSingleValue()->getUnderlyingValue());
+ (void)Instr;
+ assert((!Instr || !Instr->hasPoisonGeneratingFlags()) &&
+ "found instruction with poison generating flags not covered by "
+ "VPRecipeWithIRFlags");
+ }
+
+ // Add new definitions to the worklist.
+ for (VPValue *operand : CurRec->operands())
+ if (VPRecipeBase *OpDef = operand->getDefiningRecipe())
+ Worklist.push_back(OpDef);
+ }
+ });
+
+ // Traverse all the recipes in the VPlan and collect the poison-generating
+ // recipes in the backward slice starting at the address of a VPWidenRecipe or
+ // VPInterleaveRecipe.
+ auto Iter = vp_depth_first_deep(Plan.getEntry());
+ for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
+ for (VPRecipeBase &Recipe : *VPBB) {
+ if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) {
+ Instruction &UnderlyingInstr = WidenRec->getIngredient();
+ VPRecipeBase *AddrDef = WidenRec->getAddr()->getDefiningRecipe();
+ if (AddrDef && WidenRec->isConsecutive() &&
+ BlockNeedsPredication(UnderlyingInstr.getParent()))
+ collectPoisonGeneratingInstrsInBackwardSlice(AddrDef);
+ } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) {
+ VPRecipeBase *AddrDef = InterleaveRec->getAddr()->getDefiningRecipe();
+ if (AddrDef) {
+ // Check if any member of the interleave group needs predication.
+ const InterleaveGroup<Instruction> *InterGroup =
+ InterleaveRec->getInterleaveGroup();
+ bool NeedPredication = false;
+ for (int I = 0, NumMembers = InterGroup->getNumMembers();
+ I < NumMembers; ++I) {
+ Instruction *Member = InterGroup->getMember(I);
+ if (Member)
+ NeedPredication |= BlockNeedsPredication(Member->getParent());
+ }
+
+ if (NeedPredication)
+ collectPoisonGeneratingInstrsInBackwardSlice(AddrDef);
+ }
+ }
+ }
+ }
+}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
index 3bf9111..4a8e9f5 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
@@ -84,6 +84,21 @@ struct VPlanTransforms {
const MapVector<Instruction *, uint64_t> &MinBWs,
LLVMContext &Ctx);
+ /// Drop poison flags from recipes that may generate a poison value that is
+ /// used after vectorization, even when their operands are not poison. Those
+ /// recipes meet the following conditions:
+ /// * Contribute to the address computation of a recipe generating a widen
+ /// memory load/store (VPWidenMemoryInstructionRecipe or
+ /// VPInterleaveRecipe).
+ /// * Such a widen memory load/store has at least one underlying Instruction
+ /// that is in a basic block that needs predication and after vectorization
+ /// the generated instruction won't be predicated.
+ /// Uses \p BlockNeedsPredication to check if a block needs predicating.
+ /// TODO: Replace BlockNeedsPredication callback with retrieving info from
+ /// VPlan directly.
+ static void dropPoisonGeneratingRecipes(
+ VPlan &Plan, function_ref<bool(BasicBlock *)> BlockNeedsPredication);
+
private:
/// Remove redundant VPBasicBlocks by merging them into their predecessor if
/// the predecessor has a single successor.
diff --git a/llvm/test/Analysis/CostModel/AArch64/cast.ll b/llvm/test/Analysis/CostModel/AArch64/cast.ll
index 5dd37e8..0cd444f 100644
--- a/llvm/test/Analysis/CostModel/AArch64/cast.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/cast.ll
@@ -1,9 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=aarch64-none-linux-gnueabi %s | FileCheck --check-prefixes=CHECK,CHECK-NOFP16 %s
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=aarch64-none-linux-gnueabi -mattr=+sve -force-streaming-compatible-sve %s | FileCheck --check-prefixes=SVE,SVE128-NO-NEON %s
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=aarch64-none-linux-gnueabi -mattr=+fullfp16 %s | FileCheck --check-prefixes=CHECK,CHECK-FP16 %s
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=aarch64-none-linux-gnueabi -mattr=+sve -aarch64-sve-vector-bits-min=256 %s | FileCheck --check-prefixes=SVE,FIXED-MIN-256 %s
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=aarch64-none-linux-gnueabi -mattr=+sve -aarch64-sve-vector-bits-min=2048 %s | FileCheck --check-prefixes=SVE,FIXED-MIN-2048 %s
+; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=aarch64 %s | FileCheck --check-prefixes=CHECK,CHECK-NOFP16 %s
+; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=aarch64 -mattr=+sve -force-streaming-compatible-sve %s | FileCheck --check-prefixes=SVE,SVE128-NO-NEON %s
+; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=aarch64 -mattr=+fullfp16 %s | FileCheck --check-prefixes=CHECK,CHECK-FP16 %s
+; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=aarch64 -mattr=+sve -aarch64-sve-vector-bits-min=256 %s | FileCheck --check-prefixes=SVE,FIXED-MIN-256 %s
+; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=aarch64 -mattr=+sve -aarch64-sve-vector-bits-min=2048 %s | FileCheck --check-prefixes=SVE,FIXED-MIN-2048 %s
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
diff --git a/llvm/test/Analysis/CostModel/AArch64/fptoi_sat.ll b/llvm/test/Analysis/CostModel/AArch64/fptoi_sat.ll
index a352424..e4e2914 100644
--- a/llvm/test/Analysis/CostModel/AArch64/fptoi_sat.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/fptoi_sat.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=aarch64-none-linux-gnueabi %s | FileCheck --check-prefixes=CHECK,CHECK-NOFP16 %s
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=aarch64-none-linux-gnueabi -mattr=+fullfp16 %s | FileCheck --check-prefixes=CHECK,CHECK-FP16 %s
+; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=aarch64 %s | FileCheck --check-prefixes=CHECK,CHECK-NOFP16 %s
+; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=aarch64 -mattr=+fullfp16 %s | FileCheck --check-prefixes=CHECK,CHECK-FP16 %s
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
diff --git a/llvm/test/Analysis/CostModel/RISCV/rvv-vectorextract.ll b/llvm/test/Analysis/CostModel/RISCV/rvv-vectorextract.ll
new file mode 100644
index 0000000..1e2d1f4
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/RISCV/rvv-vectorextract.ll
@@ -0,0 +1,169 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v < %s | FileCheck %s
+; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v -cost-kind=code-size < %s | FileCheck %s --check-prefix=SIZE
+
+define void @vector_extract_nxv128i8_0(<vscale x 128 x i8> %v) {
+; CHECK-LABEL: 'vector_extract_nxv128i8_0'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf8 = call <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf4 = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf2 = call <vscale x 4 x i8> @llvm.vector.extract.nxv4i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m1 = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m2 = call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m4 = call <vscale x 32 x i8> @llvm.vector.extract.nxv32i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m8 = call <vscale x 64 x i8> @llvm.vector.extract.nxv64i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf8 = call <2 x i8> @llvm.vector.extract.v2i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf4 = call <4 x i8> @llvm.vector.extract.v4i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf2 = call <8 x i8> @llvm.vector.extract.v8i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m1 = call <16 x i8> @llvm.vector.extract.v16i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m2 = call <32 x i8> @llvm.vector.extract.v32i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m4 = call <64 x i8> @llvm.vector.extract.v64i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m8 = call <128 x i8> @llvm.vector.extract.v128i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; SIZE-LABEL: 'vector_extract_nxv128i8_0'
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf8 = call <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf4 = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf2 = call <vscale x 4 x i8> @llvm.vector.extract.nxv4i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m1 = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m2 = call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m4 = call <vscale x 32 x i8> @llvm.vector.extract.nxv32i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m8 = call <vscale x 64 x i8> @llvm.vector.extract.nxv64i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_mf8 = call <2 x i8> @llvm.vector.extract.v2i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_mf4 = call <4 x i8> @llvm.vector.extract.v4i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_mf2 = call <8 x i8> @llvm.vector.extract.v8i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_m1 = call <16 x i8> @llvm.vector.extract.v16i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_m2 = call <32 x i8> @llvm.vector.extract.v32i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_m4 = call <64 x i8> @llvm.vector.extract.v64i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_m8 = call <128 x i8> @llvm.vector.extract.v128i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ %scalable_mf8 = call <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+ %scalable_mf4 = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+ %scalable_mf2 = call <vscale x 4 x i8> @llvm.vector.extract.nxv4i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+ %scalable_m1 = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+ %scalable_m2 = call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+ %scalable_m4 = call <vscale x 32 x i8> @llvm.vector.extract.nxv32i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+ %scalable_m8 = call <vscale x 64 x i8> @llvm.vector.extract.nxv64i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+
+ %fixed_mf8 = call <2 x i8> @llvm.vector.extract.v2i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+ %fixed_mf4 = call <4 x i8> @llvm.vector.extract.v4i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+ %fixed_mf2 = call <8 x i8> @llvm.vector.extract.v8i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+ %fixed_m1 = call <16 x i8> @llvm.vector.extract.v16i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+ %fixed_m2 = call <32 x i8> @llvm.vector.extract.v32i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+ %fixed_m4 = call <64 x i8> @llvm.vector.extract.v64i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+ %fixed_m8 = call <128 x i8> @llvm.vector.extract.v128i8.nxv128i8(<vscale x 128 x i8> %v, i64 0)
+ ret void
+}
+
+define void @vector_extract_nxv128i8_1(<vscale x 128 x i8> %v) {
+; CHECK-LABEL: 'vector_extract_nxv128i8_1'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf8 = call <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv128i8(<vscale x 128 x i8> %v, i64 1)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf4 = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv128i8(<vscale x 128 x i8> %v, i64 2)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf2 = call <vscale x 4 x i8> @llvm.vector.extract.nxv4i8.nxv128i8(<vscale x 128 x i8> %v, i64 4)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m1 = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv128i8(<vscale x 128 x i8> %v, i64 8)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m2 = call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv128i8(<vscale x 128 x i8> %v, i64 16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m4 = call <vscale x 32 x i8> @llvm.vector.extract.nxv32i8.nxv128i8(<vscale x 128 x i8> %v, i64 32)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m8 = call <vscale x 64 x i8> @llvm.vector.extract.nxv64i8.nxv128i8(<vscale x 128 x i8> %v, i64 64)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf8 = call <2 x i8> @llvm.vector.extract.v2i8.nxv128i8(<vscale x 128 x i8> %v, i64 2)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf4 = call <4 x i8> @llvm.vector.extract.v4i8.nxv128i8(<vscale x 128 x i8> %v, i64 4)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf2 = call <8 x i8> @llvm.vector.extract.v8i8.nxv128i8(<vscale x 128 x i8> %v, i64 8)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m1 = call <16 x i8> @llvm.vector.extract.v16i8.nxv128i8(<vscale x 128 x i8> %v, i64 16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m2 = call <32 x i8> @llvm.vector.extract.v32i8.nxv128i8(<vscale x 128 x i8> %v, i64 32)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m4 = call <64 x i8> @llvm.vector.extract.v64i8.nxv128i8(<vscale x 128 x i8> %v, i64 64)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m8 = call <128 x i8> @llvm.vector.extract.v128i8.nxv128i8(<vscale x 128 x i8> %v, i64 128)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; SIZE-LABEL: 'vector_extract_nxv128i8_1'
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf8 = call <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv128i8(<vscale x 128 x i8> %v, i64 1)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf4 = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv128i8(<vscale x 128 x i8> %v, i64 2)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf2 = call <vscale x 4 x i8> @llvm.vector.extract.nxv4i8.nxv128i8(<vscale x 128 x i8> %v, i64 4)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m1 = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv128i8(<vscale x 128 x i8> %v, i64 8)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m2 = call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv128i8(<vscale x 128 x i8> %v, i64 16)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m4 = call <vscale x 32 x i8> @llvm.vector.extract.nxv32i8.nxv128i8(<vscale x 128 x i8> %v, i64 32)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m8 = call <vscale x 64 x i8> @llvm.vector.extract.nxv64i8.nxv128i8(<vscale x 128 x i8> %v, i64 64)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_mf8 = call <2 x i8> @llvm.vector.extract.v2i8.nxv128i8(<vscale x 128 x i8> %v, i64 2)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_mf4 = call <4 x i8> @llvm.vector.extract.v4i8.nxv128i8(<vscale x 128 x i8> %v, i64 4)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_mf2 = call <8 x i8> @llvm.vector.extract.v8i8.nxv128i8(<vscale x 128 x i8> %v, i64 8)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_m1 = call <16 x i8> @llvm.vector.extract.v16i8.nxv128i8(<vscale x 128 x i8> %v, i64 16)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_m2 = call <32 x i8> @llvm.vector.extract.v32i8.nxv128i8(<vscale x 128 x i8> %v, i64 32)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_m4 = call <64 x i8> @llvm.vector.extract.v64i8.nxv128i8(<vscale x 128 x i8> %v, i64 64)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_m8 = call <128 x i8> @llvm.vector.extract.v128i8.nxv128i8(<vscale x 128 x i8> %v, i64 128)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ %scalable_mf8 = call <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv128i8(<vscale x 128 x i8> %v, i64 1)
+ %scalable_mf4 = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv128i8(<vscale x 128 x i8> %v, i64 2)
+ %scalable_mf2 = call <vscale x 4 x i8> @llvm.vector.extract.nxv4i8.nxv128i8(<vscale x 128 x i8> %v, i64 4)
+ %scalable_m1 = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv128i8(<vscale x 128 x i8> %v, i64 8)
+ %scalable_m2 = call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv128i8(<vscale x 128 x i8> %v, i64 16)
+ %scalable_m4 = call <vscale x 32 x i8> @llvm.vector.extract.nxv32i8.nxv128i8(<vscale x 128 x i8> %v, i64 32)
+ %scalable_m8 = call <vscale x 64 x i8> @llvm.vector.extract.nxv64i8.nxv128i8(<vscale x 128 x i8> %v, i64 64)
+
+ %fixed_mf8 = call <2 x i8> @llvm.vector.extract.v2i8.nxv128i8(<vscale x 128 x i8> %v, i64 2)
+ %fixed_mf4 = call <4 x i8> @llvm.vector.extract.v4i8.nxv128i8(<vscale x 128 x i8> %v, i64 4)
+ %fixed_mf2 = call <8 x i8> @llvm.vector.extract.v8i8.nxv128i8(<vscale x 128 x i8> %v, i64 8)
+ %fixed_m1 = call <16 x i8> @llvm.vector.extract.v16i8.nxv128i8(<vscale x 128 x i8> %v, i64 16)
+ %fixed_m2 = call <32 x i8> @llvm.vector.extract.v32i8.nxv128i8(<vscale x 128 x i8> %v, i64 32)
+ %fixed_m4 = call <64 x i8> @llvm.vector.extract.v64i8.nxv128i8(<vscale x 128 x i8> %v, i64 64)
+ %fixed_m8 = call <128 x i8> @llvm.vector.extract.v128i8.nxv128i8(<vscale x 128 x i8> %v, i64 128)
+ ret void
+}
+
+define void @vector_extract_v128i8_0(<128 x i8> %v) {
+; CHECK-LABEL: 'vector_extract_v128i8_0'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf8 = call <2 x i8> @llvm.vector.extract.v2i8.v128i8(<128 x i8> %v, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf4 = call <4 x i8> @llvm.vector.extract.v4i8.v128i8(<128 x i8> %v, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf2 = call <8 x i8> @llvm.vector.extract.v8i8.v128i8(<128 x i8> %v, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m1 = call <16 x i8> @llvm.vector.extract.v16i8.v128i8(<128 x i8> %v, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m2 = call <32 x i8> @llvm.vector.extract.v32i8.v128i8(<128 x i8> %v, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m4 = call <64 x i8> @llvm.vector.extract.v64i8.v128i8(<128 x i8> %v, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m8 = call <128 x i8> @llvm.vector.extract.v128i8.v128i8(<128 x i8> %v, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; SIZE-LABEL: 'vector_extract_v128i8_0'
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf8 = call <2 x i8> @llvm.vector.extract.v2i8.v128i8(<128 x i8> %v, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf4 = call <4 x i8> @llvm.vector.extract.v4i8.v128i8(<128 x i8> %v, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf2 = call <8 x i8> @llvm.vector.extract.v8i8.v128i8(<128 x i8> %v, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m1 = call <16 x i8> @llvm.vector.extract.v16i8.v128i8(<128 x i8> %v, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m2 = call <32 x i8> @llvm.vector.extract.v32i8.v128i8(<128 x i8> %v, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m4 = call <64 x i8> @llvm.vector.extract.v64i8.v128i8(<128 x i8> %v, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m8 = call <128 x i8> @llvm.vector.extract.v128i8.v128i8(<128 x i8> %v, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ %fixed_mf8 = call <2 x i8> @llvm.vector.extract.v2i8.v128i8(<128 x i8> %v, i64 0)
+ %fixed_mf4 = call <4 x i8> @llvm.vector.extract.v4i8.v128i8(<128 x i8> %v, i64 0)
+ %fixed_mf2 = call <8 x i8> @llvm.vector.extract.v8i8.v128i8(<128 x i8> %v, i64 0)
+ %fixed_m1 = call <16 x i8> @llvm.vector.extract.v16i8.v128i8(<128 x i8> %v, i64 0)
+ %fixed_m2 = call <32 x i8> @llvm.vector.extract.v32i8.v128i8(<128 x i8> %v, i64 0)
+ %fixed_m4 = call <64 x i8> @llvm.vector.extract.v64i8.v128i8(<128 x i8> %v, i64 0)
+ %fixed_m8 = call <128 x i8> @llvm.vector.extract.v128i8.v128i8(<128 x i8> %v, i64 0)
+ ret void
+}
+
+define void @vector_extract_v128i8_1(<128 x i8> %v) {
+; CHECK-LABEL: 'vector_extract_v128i8_1'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf8 = call <2 x i8> @llvm.vector.extract.v2i8.v128i8(<128 x i8> %v, i64 2)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf4 = call <4 x i8> @llvm.vector.extract.v4i8.v128i8(<128 x i8> %v, i64 4)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf2 = call <8 x i8> @llvm.vector.extract.v8i8.v128i8(<128 x i8> %v, i64 8)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m1 = call <16 x i8> @llvm.vector.extract.v16i8.v128i8(<128 x i8> %v, i64 16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m2 = call <32 x i8> @llvm.vector.extract.v32i8.v128i8(<128 x i8> %v, i64 32)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m4 = call <64 x i8> @llvm.vector.extract.v64i8.v128i8(<128 x i8> %v, i64 64)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; SIZE-LABEL: 'vector_extract_v128i8_1'
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf8 = call <2 x i8> @llvm.vector.extract.v2i8.v128i8(<128 x i8> %v, i64 2)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf4 = call <4 x i8> @llvm.vector.extract.v4i8.v128i8(<128 x i8> %v, i64 4)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf2 = call <8 x i8> @llvm.vector.extract.v8i8.v128i8(<128 x i8> %v, i64 8)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m1 = call <16 x i8> @llvm.vector.extract.v16i8.v128i8(<128 x i8> %v, i64 16)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m2 = call <32 x i8> @llvm.vector.extract.v32i8.v128i8(<128 x i8> %v, i64 32)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m4 = call <64 x i8> @llvm.vector.extract.v64i8.v128i8(<128 x i8> %v, i64 64)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ %fixed_mf8 = call <2 x i8> @llvm.vector.extract.v2i8.v128i8(<128 x i8> %v, i64 2)
+ %fixed_mf4 = call <4 x i8> @llvm.vector.extract.v4i8.v128i8(<128 x i8> %v, i64 4)
+ %fixed_mf2 = call <8 x i8> @llvm.vector.extract.v8i8.v128i8(<128 x i8> %v, i64 8)
+ %fixed_m1 = call <16 x i8> @llvm.vector.extract.v16i8.v128i8(<128 x i8> %v, i64 16)
+ %fixed_m2 = call <32 x i8> @llvm.vector.extract.v32i8.v128i8(<128 x i8> %v, i64 32)
+ %fixed_m4 = call <64 x i8> @llvm.vector.extract.v64i8.v128i8(<128 x i8> %v, i64 64)
+ ; No @llvm.vector.extract.v128i8.v128i8(<128 x i8> %v, i64 128) since it would overrun
+ ret void
+}
diff --git a/llvm/test/Analysis/CostModel/RISCV/rvv-vectorinsert.ll b/llvm/test/Analysis/CostModel/RISCV/rvv-vectorinsert.ll
new file mode 100644
index 0000000..7a9f45c
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/RISCV/rvv-vectorinsert.ll
@@ -0,0 +1,335 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v < %s | FileCheck %s
+; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v -cost-kind=code-size < %s | FileCheck %s --check-prefix=SIZE
+
+define void @vector_insert_nxv128i8_0(<vscale x 128 x i8> %v) {
+; CHECK-LABEL: 'vector_insert_nxv128i8_0'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv1i8(<vscale x 128 x i8> %v, <vscale x 1 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv2i8(<vscale x 128 x i8> %v, <vscale x 2 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv4i8(<vscale x 128 x i8> %v, <vscale x 4 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv8i8(<vscale x 128 x i8> %v, <vscale x 8 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv16i8(<vscale x 128 x i8> %v, <vscale x 16 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv32i8(<vscale x 128 x i8> %v, <vscale x 32 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv64i8(<vscale x 128 x i8> %v, <vscale x 64 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v2i8(<vscale x 128 x i8> %v, <2 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v4i8(<vscale x 128 x i8> %v, <4 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v8i8(<vscale x 128 x i8> %v, <8 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v16i8(<vscale x 128 x i8> %v, <16 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v32i8(<vscale x 128 x i8> %v, <32 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v64i8(<vscale x 128 x i8> %v, <64 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v128i8(<vscale x 128 x i8> %v, <128 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; SIZE-LABEL: 'vector_insert_nxv128i8_0'
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv1i8(<vscale x 128 x i8> %v, <vscale x 1 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv2i8(<vscale x 128 x i8> %v, <vscale x 2 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv4i8(<vscale x 128 x i8> %v, <vscale x 4 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv8i8(<vscale x 128 x i8> %v, <vscale x 8 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv16i8(<vscale x 128 x i8> %v, <vscale x 16 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv32i8(<vscale x 128 x i8> %v, <vscale x 32 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv64i8(<vscale x 128 x i8> %v, <vscale x 64 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v2i8(<vscale x 128 x i8> %v, <2 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v4i8(<vscale x 128 x i8> %v, <4 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v8i8(<vscale x 128 x i8> %v, <8 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v16i8(<vscale x 128 x i8> %v, <16 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v32i8(<vscale x 128 x i8> %v, <32 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v64i8(<vscale x 128 x i8> %v, <64 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v128i8(<vscale x 128 x i8> %v, <128 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ %scalable_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv1i8(<vscale x 128 x i8> %v, <vscale x 1 x i8> undef, i64 0)
+ %scalable_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv2i8(<vscale x 128 x i8> %v, <vscale x 2 x i8> undef, i64 0)
+ %scalable_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv4i8(<vscale x 128 x i8> %v, <vscale x 4 x i8> undef, i64 0)
+ %scalable_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv8i8(<vscale x 128 x i8> %v, <vscale x 8 x i8> undef, i64 0)
+ %scalable_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv16i8(<vscale x 128 x i8> %v, <vscale x 16 x i8> undef, i64 0)
+ %scalable_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv32i8(<vscale x 128 x i8> %v, <vscale x 32 x i8> undef, i64 0)
+ %scalable_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv64i8(<vscale x 128 x i8> %v, <vscale x 64 x i8> undef, i64 0)
+
+ %fixed_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v2i8(<vscale x 128 x i8> %v, <2 x i8> undef, i64 0)
+ %fixed_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v4i8(<vscale x 128 x i8> %v, <4 x i8> undef, i64 0)
+ %fixed_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v8i8(<vscale x 128 x i8> %v, <8 x i8> undef, i64 0)
+ %fixed_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v16i8(<vscale x 128 x i8> %v, <16 x i8> undef, i64 0)
+ %fixed_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v32i8(<vscale x 128 x i8> %v, <32 x i8> undef, i64 0)
+ %fixed_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v64i8(<vscale x 128 x i8> %v, <64 x i8> undef, i64 0)
+ %fixed_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v128i8(<vscale x 128 x i8> %v, <128 x i8> undef, i64 0)
+ ret void
+}
+
+define void @vector_insert_nxv128i8_undef_0() {
+; CHECK-LABEL: 'vector_insert_nxv128i8_undef_0'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv1i8(<vscale x 128 x i8> undef, <vscale x 1 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv2i8(<vscale x 128 x i8> undef, <vscale x 2 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv4i8(<vscale x 128 x i8> undef, <vscale x 4 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv8i8(<vscale x 128 x i8> undef, <vscale x 8 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv16i8(<vscale x 128 x i8> undef, <vscale x 16 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv32i8(<vscale x 128 x i8> undef, <vscale x 32 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv64i8(<vscale x 128 x i8> undef, <vscale x 64 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v2i8(<vscale x 128 x i8> undef, <2 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v4i8(<vscale x 128 x i8> undef, <4 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v8i8(<vscale x 128 x i8> undef, <8 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v16i8(<vscale x 128 x i8> undef, <16 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v32i8(<vscale x 128 x i8> undef, <32 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v64i8(<vscale x 128 x i8> undef, <64 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v128i8(<vscale x 128 x i8> undef, <128 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; SIZE-LABEL: 'vector_insert_nxv128i8_undef_0'
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv1i8(<vscale x 128 x i8> undef, <vscale x 1 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv2i8(<vscale x 128 x i8> undef, <vscale x 2 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv4i8(<vscale x 128 x i8> undef, <vscale x 4 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv8i8(<vscale x 128 x i8> undef, <vscale x 8 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv16i8(<vscale x 128 x i8> undef, <vscale x 16 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv32i8(<vscale x 128 x i8> undef, <vscale x 32 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv64i8(<vscale x 128 x i8> undef, <vscale x 64 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v2i8(<vscale x 128 x i8> undef, <2 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v4i8(<vscale x 128 x i8> undef, <4 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v8i8(<vscale x 128 x i8> undef, <8 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v16i8(<vscale x 128 x i8> undef, <16 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v32i8(<vscale x 128 x i8> undef, <32 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v64i8(<vscale x 128 x i8> undef, <64 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v128i8(<vscale x 128 x i8> undef, <128 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ %scalable_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv1i8(<vscale x 128 x i8> undef, <vscale x 1 x i8> undef, i64 0)
+ %scalable_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv2i8(<vscale x 128 x i8> undef, <vscale x 2 x i8> undef, i64 0)
+ %scalable_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv4i8(<vscale x 128 x i8> undef, <vscale x 4 x i8> undef, i64 0)
+ %scalable_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv8i8(<vscale x 128 x i8> undef, <vscale x 8 x i8> undef, i64 0)
+ %scalable_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv16i8(<vscale x 128 x i8> undef, <vscale x 16 x i8> undef, i64 0)
+ %scalable_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv32i8(<vscale x 128 x i8> undef, <vscale x 32 x i8> undef, i64 0)
+ %scalable_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv64i8(<vscale x 128 x i8> undef, <vscale x 64 x i8> undef, i64 0)
+
+ %fixed_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v2i8(<vscale x 128 x i8> undef, <2 x i8> undef, i64 0)
+ %fixed_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v4i8(<vscale x 128 x i8> undef, <4 x i8> undef, i64 0)
+ %fixed_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v8i8(<vscale x 128 x i8> undef, <8 x i8> undef, i64 0)
+ %fixed_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v16i8(<vscale x 128 x i8> undef, <16 x i8> undef, i64 0)
+ %fixed_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v32i8(<vscale x 128 x i8> undef, <32 x i8> undef, i64 0)
+ %fixed_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v64i8(<vscale x 128 x i8> undef, <64 x i8> undef, i64 0)
+ %fixed_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v128i8(<vscale x 128 x i8> undef, <128 x i8> undef, i64 0)
+ ret void
+}
+
+define void @vector_insert_nxv128i8_1(<vscale x 128 x i8> %v) {
+; CHECK-LABEL: 'vector_insert_nxv128i8_1'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv1i8(<vscale x 128 x i8> %v, <vscale x 1 x i8> undef, i64 1)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv2i8(<vscale x 128 x i8> %v, <vscale x 2 x i8> undef, i64 2)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv4i8(<vscale x 128 x i8> %v, <vscale x 4 x i8> undef, i64 4)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv8i8(<vscale x 128 x i8> %v, <vscale x 8 x i8> undef, i64 8)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv16i8(<vscale x 128 x i8> %v, <vscale x 16 x i8> undef, i64 16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv32i8(<vscale x 128 x i8> %v, <vscale x 32 x i8> undef, i64 32)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv64i8(<vscale x 128 x i8> %v, <vscale x 64 x i8> undef, i64 64)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v2i8(<vscale x 128 x i8> %v, <2 x i8> undef, i64 2)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v4i8(<vscale x 128 x i8> %v, <4 x i8> undef, i64 4)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v8i8(<vscale x 128 x i8> %v, <8 x i8> undef, i64 8)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v16i8(<vscale x 128 x i8> %v, <16 x i8> undef, i64 16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v32i8(<vscale x 128 x i8> %v, <32 x i8> undef, i64 32)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v64i8(<vscale x 128 x i8> %v, <64 x i8> undef, i64 64)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v128i8(<vscale x 128 x i8> %v, <128 x i8> undef, i64 128)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; SIZE-LABEL: 'vector_insert_nxv128i8_1'
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv1i8(<vscale x 128 x i8> %v, <vscale x 1 x i8> undef, i64 1)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv2i8(<vscale x 128 x i8> %v, <vscale x 2 x i8> undef, i64 2)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv4i8(<vscale x 128 x i8> %v, <vscale x 4 x i8> undef, i64 4)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv8i8(<vscale x 128 x i8> %v, <vscale x 8 x i8> undef, i64 8)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv16i8(<vscale x 128 x i8> %v, <vscale x 16 x i8> undef, i64 16)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv32i8(<vscale x 128 x i8> %v, <vscale x 32 x i8> undef, i64 32)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv64i8(<vscale x 128 x i8> %v, <vscale x 64 x i8> undef, i64 64)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v2i8(<vscale x 128 x i8> %v, <2 x i8> undef, i64 2)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v4i8(<vscale x 128 x i8> %v, <4 x i8> undef, i64 4)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v8i8(<vscale x 128 x i8> %v, <8 x i8> undef, i64 8)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v16i8(<vscale x 128 x i8> %v, <16 x i8> undef, i64 16)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v32i8(<vscale x 128 x i8> %v, <32 x i8> undef, i64 32)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v64i8(<vscale x 128 x i8> %v, <64 x i8> undef, i64 64)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v128i8(<vscale x 128 x i8> %v, <128 x i8> undef, i64 128)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ %scalable_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv1i8(<vscale x 128 x i8> %v, <vscale x 1 x i8> undef, i64 1)
+ %scalable_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv2i8(<vscale x 128 x i8> %v, <vscale x 2 x i8> undef, i64 2)
+ %scalable_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv4i8(<vscale x 128 x i8> %v, <vscale x 4 x i8> undef, i64 4)
+ %scalable_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv8i8(<vscale x 128 x i8> %v, <vscale x 8 x i8> undef, i64 8)
+ %scalable_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv16i8(<vscale x 128 x i8> %v, <vscale x 16 x i8> undef, i64 16)
+ %scalable_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv32i8(<vscale x 128 x i8> %v, <vscale x 32 x i8> undef, i64 32)
+ %scalable_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv64i8(<vscale x 128 x i8> %v, <vscale x 64 x i8> undef, i64 64)
+
+ %fixed_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v2i8(<vscale x 128 x i8> %v, <2 x i8> undef, i64 2)
+ %fixed_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v4i8(<vscale x 128 x i8> %v, <4 x i8> undef, i64 4)
+ %fixed_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v8i8(<vscale x 128 x i8> %v, <8 x i8> undef, i64 8)
+ %fixed_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v16i8(<vscale x 128 x i8> %v, <16 x i8> undef, i64 16)
+ %fixed_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v32i8(<vscale x 128 x i8> %v, <32 x i8> undef, i64 32)
+ %fixed_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v64i8(<vscale x 128 x i8> %v, <64 x i8> undef, i64 64)
+ %fixed_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v128i8(<vscale x 128 x i8> %v, <128 x i8> undef, i64 128)
+ ret void
+}
+
+define void @vector_insert_nxv128i8_undef_1() {
+; CHECK-LABEL: 'vector_insert_nxv128i8_undef_1'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv1i8(<vscale x 128 x i8> undef, <vscale x 1 x i8> undef, i64 1)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv2i8(<vscale x 128 x i8> undef, <vscale x 2 x i8> undef, i64 2)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv4i8(<vscale x 128 x i8> undef, <vscale x 4 x i8> undef, i64 4)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv8i8(<vscale x 128 x i8> undef, <vscale x 8 x i8> undef, i64 8)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv16i8(<vscale x 128 x i8> undef, <vscale x 16 x i8> undef, i64 16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv32i8(<vscale x 128 x i8> undef, <vscale x 32 x i8> undef, i64 32)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv64i8(<vscale x 128 x i8> undef, <vscale x 64 x i8> undef, i64 64)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v2i8(<vscale x 128 x i8> undef, <2 x i8> undef, i64 2)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v4i8(<vscale x 128 x i8> undef, <4 x i8> undef, i64 4)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v8i8(<vscale x 128 x i8> undef, <8 x i8> undef, i64 8)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v16i8(<vscale x 128 x i8> undef, <16 x i8> undef, i64 16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v32i8(<vscale x 128 x i8> undef, <32 x i8> undef, i64 32)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v64i8(<vscale x 128 x i8> undef, <64 x i8> undef, i64 64)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v128i8(<vscale x 128 x i8> undef, <128 x i8> undef, i64 128)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; SIZE-LABEL: 'vector_insert_nxv128i8_undef_1'
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv1i8(<vscale x 128 x i8> undef, <vscale x 1 x i8> undef, i64 1)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv2i8(<vscale x 128 x i8> undef, <vscale x 2 x i8> undef, i64 2)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv4i8(<vscale x 128 x i8> undef, <vscale x 4 x i8> undef, i64 4)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv8i8(<vscale x 128 x i8> undef, <vscale x 8 x i8> undef, i64 8)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv16i8(<vscale x 128 x i8> undef, <vscale x 16 x i8> undef, i64 16)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv32i8(<vscale x 128 x i8> undef, <vscale x 32 x i8> undef, i64 32)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv64i8(<vscale x 128 x i8> undef, <vscale x 64 x i8> undef, i64 64)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v2i8(<vscale x 128 x i8> undef, <2 x i8> undef, i64 2)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v4i8(<vscale x 128 x i8> undef, <4 x i8> undef, i64 4)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v8i8(<vscale x 128 x i8> undef, <8 x i8> undef, i64 8)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v16i8(<vscale x 128 x i8> undef, <16 x i8> undef, i64 16)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v32i8(<vscale x 128 x i8> undef, <32 x i8> undef, i64 32)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v64i8(<vscale x 128 x i8> undef, <64 x i8> undef, i64 64)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fixed_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v128i8(<vscale x 128 x i8> undef, <128 x i8> undef, i64 128)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ %scalable_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv1i8(<vscale x 128 x i8> undef, <vscale x 1 x i8> undef, i64 1)
+ %scalable_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv2i8(<vscale x 128 x i8> undef, <vscale x 2 x i8> undef, i64 2)
+ %scalable_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv4i8(<vscale x 128 x i8> undef, <vscale x 4 x i8> undef, i64 4)
+ %scalable_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv8i8(<vscale x 128 x i8> undef, <vscale x 8 x i8> undef, i64 8)
+ %scalable_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv16i8(<vscale x 128 x i8> undef, <vscale x 16 x i8> undef, i64 16)
+ %scalable_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv32i8(<vscale x 128 x i8> undef, <vscale x 32 x i8> undef, i64 32)
+ %scalable_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv64i8(<vscale x 128 x i8> undef, <vscale x 64 x i8> undef, i64 64)
+
+ %fixed_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v2i8(<vscale x 128 x i8> undef, <2 x i8> undef, i64 2)
+ %fixed_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v4i8(<vscale x 128 x i8> undef, <4 x i8> undef, i64 4)
+ %fixed_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v8i8(<vscale x 128 x i8> undef, <8 x i8> undef, i64 8)
+ %fixed_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v16i8(<vscale x 128 x i8> undef, <16 x i8> undef, i64 16)
+ %fixed_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v32i8(<vscale x 128 x i8> undef, <32 x i8> undef, i64 32)
+ %fixed_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v64i8(<vscale x 128 x i8> undef, <64 x i8> undef, i64 64)
+ %fixed_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v128i8(<vscale x 128 x i8> undef, <128 x i8> undef, i64 128)
+ ret void
+}
+
+define void @vector_insert_v128i8_0(<128 x i8> %v) {
+; CHECK-LABEL: 'vector_insert_v128i8_0'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf8 = call <128 x i8> @llvm.vector.insert.v128i8.v2i8(<128 x i8> %v, <2 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf4 = call <128 x i8> @llvm.vector.insert.v128i8.v4i8(<128 x i8> %v, <4 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf2 = call <128 x i8> @llvm.vector.insert.v128i8.v8i8(<128 x i8> %v, <8 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m1 = call <128 x i8> @llvm.vector.insert.v128i8.v16i8(<128 x i8> %v, <16 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m2 = call <128 x i8> @llvm.vector.insert.v128i8.v32i8(<128 x i8> %v, <32 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m4 = call <128 x i8> @llvm.vector.insert.v128i8.v64i8(<128 x i8> %v, <64 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m8 = call <128 x i8> @llvm.vector.insert.v128i8.v128i8(<128 x i8> %v, <128 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; SIZE-LABEL: 'vector_insert_v128i8_0'
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf8 = call <128 x i8> @llvm.vector.insert.v128i8.v2i8(<128 x i8> %v, <2 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf4 = call <128 x i8> @llvm.vector.insert.v128i8.v4i8(<128 x i8> %v, <4 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf2 = call <128 x i8> @llvm.vector.insert.v128i8.v8i8(<128 x i8> %v, <8 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m1 = call <128 x i8> @llvm.vector.insert.v128i8.v16i8(<128 x i8> %v, <16 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m2 = call <128 x i8> @llvm.vector.insert.v128i8.v32i8(<128 x i8> %v, <32 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m4 = call <128 x i8> @llvm.vector.insert.v128i8.v64i8(<128 x i8> %v, <64 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m8 = call <128 x i8> @llvm.vector.insert.v128i8.v128i8(<128 x i8> %v, <128 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ %fixed_mf8 = call <128 x i8> @llvm.vector.insert.v128i8.v2i8(<128 x i8> %v, <2 x i8> undef, i64 0)
+ %fixed_mf4 = call <128 x i8> @llvm.vector.insert.v128i8.v4i8(<128 x i8> %v, <4 x i8> undef, i64 0)
+ %fixed_mf2 = call <128 x i8> @llvm.vector.insert.v128i8.v8i8(<128 x i8> %v, <8 x i8> undef, i64 0)
+ %fixed_m1 = call <128 x i8> @llvm.vector.insert.v128i8.v16i8(<128 x i8> %v, <16 x i8> undef, i64 0)
+ %fixed_m2 = call <128 x i8> @llvm.vector.insert.v128i8.v32i8(<128 x i8> %v, <32 x i8> undef, i64 0)
+ %fixed_m4 = call <128 x i8> @llvm.vector.insert.v128i8.v64i8(<128 x i8> %v, <64 x i8> undef, i64 0)
+ %fixed_m8 = call <128 x i8> @llvm.vector.insert.v128i8.v128i8(<128 x i8> %v, <128 x i8> undef, i64 0)
+ ret void
+}
+
+define void @vector_insert_v128i8_undef_0() {
+; CHECK-LABEL: 'vector_insert_v128i8_undef_0'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf8 = call <128 x i8> @llvm.vector.insert.v128i8.v2i8(<128 x i8> undef, <2 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf4 = call <128 x i8> @llvm.vector.insert.v128i8.v4i8(<128 x i8> undef, <4 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf2 = call <128 x i8> @llvm.vector.insert.v128i8.v8i8(<128 x i8> undef, <8 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m1 = call <128 x i8> @llvm.vector.insert.v128i8.v16i8(<128 x i8> undef, <16 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m2 = call <128 x i8> @llvm.vector.insert.v128i8.v32i8(<128 x i8> undef, <32 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m4 = call <128 x i8> @llvm.vector.insert.v128i8.v64i8(<128 x i8> undef, <64 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m8 = call <128 x i8> @llvm.vector.insert.v128i8.v128i8(<128 x i8> undef, <128 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; SIZE-LABEL: 'vector_insert_v128i8_undef_0'
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf8 = call <128 x i8> @llvm.vector.insert.v128i8.v2i8(<128 x i8> undef, <2 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf4 = call <128 x i8> @llvm.vector.insert.v128i8.v4i8(<128 x i8> undef, <4 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf2 = call <128 x i8> @llvm.vector.insert.v128i8.v8i8(<128 x i8> undef, <8 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m1 = call <128 x i8> @llvm.vector.insert.v128i8.v16i8(<128 x i8> undef, <16 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m2 = call <128 x i8> @llvm.vector.insert.v128i8.v32i8(<128 x i8> undef, <32 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m4 = call <128 x i8> @llvm.vector.insert.v128i8.v64i8(<128 x i8> undef, <64 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m8 = call <128 x i8> @llvm.vector.insert.v128i8.v128i8(<128 x i8> undef, <128 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ %fixed_mf8 = call <128 x i8> @llvm.vector.insert.v128i8.v2i8(<128 x i8> undef, <2 x i8> undef, i64 0)
+ %fixed_mf4 = call <128 x i8> @llvm.vector.insert.v128i8.v4i8(<128 x i8> undef, <4 x i8> undef, i64 0)
+ %fixed_mf2 = call <128 x i8> @llvm.vector.insert.v128i8.v8i8(<128 x i8> undef, <8 x i8> undef, i64 0)
+ %fixed_m1 = call <128 x i8> @llvm.vector.insert.v128i8.v16i8(<128 x i8> undef, <16 x i8> undef, i64 0)
+ %fixed_m2 = call <128 x i8> @llvm.vector.insert.v128i8.v32i8(<128 x i8> undef, <32 x i8> undef, i64 0)
+ %fixed_m4 = call <128 x i8> @llvm.vector.insert.v128i8.v64i8(<128 x i8> undef, <64 x i8> undef, i64 0)
+ %fixed_m8 = call <128 x i8> @llvm.vector.insert.v128i8.v128i8(<128 x i8> undef, <128 x i8> undef, i64 0)
+ ret void
+}
+
+define void @vector_insert_v128i8_1(<128 x i8> %v) {
+; CHECK-LABEL: 'vector_insert_v128i8_1'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf8 = call <128 x i8> @llvm.vector.insert.v128i8.v2i8(<128 x i8> %v, <2 x i8> undef, i64 2)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf4 = call <128 x i8> @llvm.vector.insert.v128i8.v4i8(<128 x i8> %v, <4 x i8> undef, i64 4)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf2 = call <128 x i8> @llvm.vector.insert.v128i8.v8i8(<128 x i8> %v, <8 x i8> undef, i64 8)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m1 = call <128 x i8> @llvm.vector.insert.v128i8.v16i8(<128 x i8> %v, <16 x i8> undef, i64 16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m2 = call <128 x i8> @llvm.vector.insert.v128i8.v32i8(<128 x i8> %v, <32 x i8> undef, i64 32)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m4 = call <128 x i8> @llvm.vector.insert.v128i8.v64i8(<128 x i8> %v, <64 x i8> undef, i64 64)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; SIZE-LABEL: 'vector_insert_v128i8_1'
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf8 = call <128 x i8> @llvm.vector.insert.v128i8.v2i8(<128 x i8> %v, <2 x i8> undef, i64 2)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf4 = call <128 x i8> @llvm.vector.insert.v128i8.v4i8(<128 x i8> %v, <4 x i8> undef, i64 4)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf2 = call <128 x i8> @llvm.vector.insert.v128i8.v8i8(<128 x i8> %v, <8 x i8> undef, i64 8)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m1 = call <128 x i8> @llvm.vector.insert.v128i8.v16i8(<128 x i8> %v, <16 x i8> undef, i64 16)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m2 = call <128 x i8> @llvm.vector.insert.v128i8.v32i8(<128 x i8> %v, <32 x i8> undef, i64 32)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m4 = call <128 x i8> @llvm.vector.insert.v128i8.v64i8(<128 x i8> %v, <64 x i8> undef, i64 64)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ %fixed_mf8 = call <128 x i8> @llvm.vector.insert.v128i8.v2i8(<128 x i8> %v, <2 x i8> undef, i64 2)
+ %fixed_mf4 = call <128 x i8> @llvm.vector.insert.v128i8.v4i8(<128 x i8> %v, <4 x i8> undef, i64 4)
+ %fixed_mf2 = call <128 x i8> @llvm.vector.insert.v128i8.v8i8(<128 x i8> %v, <8 x i8> undef, i64 8)
+ %fixed_m1 = call <128 x i8> @llvm.vector.insert.v128i8.v16i8(<128 x i8> %v, <16 x i8> undef, i64 16)
+ %fixed_m2 = call <128 x i8> @llvm.vector.insert.v128i8.v32i8(<128 x i8> %v, <32 x i8> undef, i64 32)
+ %fixed_m4 = call <128 x i8> @llvm.vector.insert.v128i8.v64i8(<128 x i8> %v, <64 x i8> undef, i64 64)
+ ; No @llvm.vector.insert.v128i8.v128i8(<128 x i8> %v, <128 x i8> undef, i64 128) since it would overrun
+ ret void
+}
+
+define void @vector_insert_v128i8_undef_1() {
+; CHECK-LABEL: 'vector_insert_v128i8_undef_1'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf8 = call <128 x i8> @llvm.vector.insert.v128i8.v2i8(<128 x i8> undef, <2 x i8> undef, i64 2)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf4 = call <128 x i8> @llvm.vector.insert.v128i8.v4i8(<128 x i8> undef, <4 x i8> undef, i64 4)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf2 = call <128 x i8> @llvm.vector.insert.v128i8.v8i8(<128 x i8> undef, <8 x i8> undef, i64 8)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m1 = call <128 x i8> @llvm.vector.insert.v128i8.v16i8(<128 x i8> undef, <16 x i8> undef, i64 16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m2 = call <128 x i8> @llvm.vector.insert.v128i8.v32i8(<128 x i8> undef, <32 x i8> undef, i64 32)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m4 = call <128 x i8> @llvm.vector.insert.v128i8.v64i8(<128 x i8> undef, <64 x i8> undef, i64 64)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; SIZE-LABEL: 'vector_insert_v128i8_undef_1'
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf8 = call <128 x i8> @llvm.vector.insert.v128i8.v2i8(<128 x i8> undef, <2 x i8> undef, i64 2)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf4 = call <128 x i8> @llvm.vector.insert.v128i8.v4i8(<128 x i8> undef, <4 x i8> undef, i64 4)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf2 = call <128 x i8> @llvm.vector.insert.v128i8.v8i8(<128 x i8> undef, <8 x i8> undef, i64 8)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m1 = call <128 x i8> @llvm.vector.insert.v128i8.v16i8(<128 x i8> undef, <16 x i8> undef, i64 16)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m2 = call <128 x i8> @llvm.vector.insert.v128i8.v32i8(<128 x i8> undef, <32 x i8> undef, i64 32)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m4 = call <128 x i8> @llvm.vector.insert.v128i8.v64i8(<128 x i8> undef, <64 x i8> undef, i64 64)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ %fixed_mf8 = call <128 x i8> @llvm.vector.insert.v128i8.v2i8(<128 x i8> undef, <2 x i8> undef, i64 2)
+ %fixed_mf4 = call <128 x i8> @llvm.vector.insert.v128i8.v4i8(<128 x i8> undef, <4 x i8> undef, i64 4)
+ %fixed_mf2 = call <128 x i8> @llvm.vector.insert.v128i8.v8i8(<128 x i8> undef, <8 x i8> undef, i64 8)
+ %fixed_m1 = call <128 x i8> @llvm.vector.insert.v128i8.v16i8(<128 x i8> undef, <16 x i8> undef, i64 16)
+ %fixed_m2 = call <128 x i8> @llvm.vector.insert.v128i8.v32i8(<128 x i8> undef, <32 x i8> undef, i64 32)
+ %fixed_m4 = call <128 x i8> @llvm.vector.insert.v128i8.v64i8(<128 x i8> undef, <64 x i8> undef, i64 64)
+ ; No @llvm.vector.insert.v128i8.v128i8(<128 x i8> undef, <128 x i8> undef, i64 128) since it would overrun
+ ret void
+}
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/number-of-memchecks.ll b/llvm/test/Analysis/LoopAccessAnalysis/number-of-memchecks.ll
index c268cc5..d428761 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/number-of-memchecks.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/number-of-memchecks.ll
@@ -1,7 +1,7 @@
; RUN: opt -passes='print<access-info>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
-target triple = "aarch64--linux-gnueabi"
+target triple = "aarch64"
; 3 reads and 3 writes should need 12 memchecks
; CHECK: function 'testf':
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll b/llvm/test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll
index 86395ee..1496e1b 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll
@@ -12,7 +12,7 @@
; }
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
-target triple = "aarch64--linux-gnueabi"
+target triple = "aarch64"
; CHECK: function 'f':
; CHECK: (Low: (20000 + %a)<nuw> High: (60004 + %a))
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xtn.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xtn.mir
index 16b780a..6612651 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xtn.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xtn.mir
@@ -529,3 +529,27 @@ body: |
RET_ReallyLR implicit $q0
...
+
+---
+name: pr81244
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $d0
+ ; CHECK-LABEL: name: pr81244
+ ; CHECK: liveins: $d0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(<2 x s8>) = G_TRUNC [[COPY]](<2 x s32>)
+ ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s8>) = G_CONCAT_VECTORS [[TRUNC]](<2 x s8>), [[TRUNC]](<2 x s8>)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<4 x s16>) = G_ANYEXT [[CONCAT_VECTORS]](<4 x s8>)
+ ; CHECK-NEXT: $d0 = COPY [[ANYEXT]](<4 x s16>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $d0
+ %0:_(<2 x s32>) = COPY $d0
+ %1:_(<2 x s8>) = G_TRUNC %0(<2 x s32>)
+ %2:_(<4 x s8>) = G_CONCAT_VECTORS %1(<2 x s8>), %1(<2 x s8>)
+ %3:_(<4 x s16>) = G_ANYEXT %2(<4 x s8>)
+ $d0 = COPY %3(<4 x s16>)
+ RET_ReallyLR implicit $d0
+
+...
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
index c90c31a..aaf2fef 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
@@ -163,6 +163,9 @@
# DEBUG-NEXT: G_READCYCLECOUNTER (opcode {{[0-9]+}}): 1 type index, 0 imm indices
# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: G_READSTEADYCOUNTER (opcode {{[0-9]+}}): 1 type index, 0 imm indices
+# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: G_LOAD (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
diff --git a/llvm/test/CodeGen/AArch64/Redundantstore.ll b/llvm/test/CodeGen/AArch64/Redundantstore.ll
index 6fec557..229d644 100644
--- a/llvm/test/CodeGen/AArch64/Redundantstore.ll
+++ b/llvm/test/CodeGen/AArch64/Redundantstore.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O3 -mtriple=aarch64-eabi | FileCheck %s
+; RUN: llc < %s -O3 -mtriple=aarch64 | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@end_of_array = common global ptr null, align 8
diff --git a/llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll b/llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll
index 122e187..f2ed57e 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll
@@ -6,8 +6,8 @@
; The following tests use the balance-fp-ops feature, and should be independent of
; the target cpu.
-; RUN: llc < %s -mtriple=aarch64-linux-gnueabi -mattr=+balance-fp-ops -aarch64-a57-fp-load-balancing-override=1 -aarch64-a57-fp-load-balancing-force-all -enable-misched=false -enable-post-misched=false | FileCheck %s --check-prefix CHECK --check-prefix CHECK-EVEN
-; RUN: llc < %s -mtriple=aarch64-linux-gnueabi -mattr=+balance-fp-ops -aarch64-a57-fp-load-balancing-override=2 -aarch64-a57-fp-load-balancing-force-all -enable-misched=false -enable-post-misched=false | FileCheck %s --check-prefix CHECK --check-prefix CHECK-ODD
+; RUN: llc < %s -mtriple=aarch64 -mattr=+balance-fp-ops -aarch64-a57-fp-load-balancing-override=1 -aarch64-a57-fp-load-balancing-force-all -enable-misched=false -enable-post-misched=false | FileCheck %s --check-prefix CHECK --check-prefix CHECK-EVEN
+; RUN: llc < %s -mtriple=aarch64 -mattr=+balance-fp-ops -aarch64-a57-fp-load-balancing-override=2 -aarch64-a57-fp-load-balancing-force-all -enable-misched=false -enable-post-misched=false | FileCheck %s --check-prefix CHECK --check-prefix CHECK-ODD
; Test the AArch64A57FPLoadBalancing pass. This pass relies heavily on register allocation, so
; our test strategy is to:
diff --git a/llvm/test/CodeGen/AArch64/aarch64-addv.ll b/llvm/test/CodeGen/AArch64/aarch64-addv.ll
index 5b78b0d..b77d591 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-addv.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-addv.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-eabi -aarch64-neon-syntax=generic | FileCheck %s -check-prefixes=CHECK,SDAG
-; RUN: llc < %s -global-isel=1 -global-isel-abort=2 -mtriple=aarch64-eabi -aarch64-neon-syntax=generic 2>&1 | FileCheck %s --check-prefixes=CHECK,GISEL
+; RUN: llc < %s -mtriple=aarch64 -aarch64-neon-syntax=generic | FileCheck %s -check-prefixes=CHECK,SDAG
+; RUN: llc < %s -global-isel=1 -global-isel-abort=2 -mtriple=aarch64 -aarch64-neon-syntax=generic 2>&1 | FileCheck %s --check-prefixes=CHECK,GISEL
; Function Attrs: nounwind readnone
declare i8 @llvm.vector.reduce.add.v2i8(<2 x i8>)
diff --git a/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll b/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll
index c2a3acb..578038b 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll
@@ -5,7 +5,7 @@
; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare -mcpu=cortex-a53 < %s 2>&1 | FileCheck --check-prefix=CHECK-UseAA %s
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
-target triple = "aarch64-linux-gnueabi"
+target triple = "aarch64"
; Following test cases test enabling SeparateConstOffsetFromGEP pass in AArch64
; backend. If useAA() returns true, it will lower a GEP with multiple indices
diff --git a/llvm/test/CodeGen/AArch64/aarch64-tbz.ll b/llvm/test/CodeGen/AArch64/aarch64-tbz.ll
index 28629a8..4cf3633 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-tbz.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-tbz.ll
@@ -1,5 +1,5 @@
-; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnueabi < %s | FileCheck %s
-; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnueabi -cgp-verify-bfi-updates=true < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=aarch64 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=aarch64 -cgp-verify-bfi-updates=true < %s | FileCheck %s
; CHECK-LABEL: test1
; CHECK: tbz {{w[0-9]}}, #3, {{.LBB0_3}}
diff --git a/llvm/test/CodeGen/AArch64/aarch64-unroll-and-jam.ll b/llvm/test/CodeGen/AArch64/aarch64-unroll-and-jam.ll
index af5f6a9..285c16a 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-unroll-and-jam.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-unroll-and-jam.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -passes='loop-unroll-and-jam' < %s -mcpu=cortex-a55 -mtriple=aarch64-none-linux-eabi -S | FileCheck %s
+; RUN: opt -passes='loop-unroll-and-jam' < %s -mcpu=cortex-a55 -mtriple=aarch64 -S | FileCheck %s
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
diff --git a/llvm/test/CodeGen/AArch64/aarch64-vcvtfp2fxs-combine.ll b/llvm/test/CodeGen/AArch64/aarch64-vcvtfp2fxs-combine.ll
index 463084e..50f7d6d 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-vcvtfp2fxs-combine.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-vcvtfp2fxs-combine.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=aarch64-linux-eabi -o - | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -o - | FileCheck %s
%struct.a= type { i64, i64, i64, i64 }
diff --git a/llvm/test/CodeGen/AArch64/arm64-build-vector.ll b/llvm/test/CodeGen/AArch64/arm64-build-vector.ll
index 68c56d7..82802c7 100644
--- a/llvm/test/CodeGen/AArch64/arm64-build-vector.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-build-vector.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-eabi -mattr=+fullfp16,+bf16 | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16,+bf16 | FileCheck %s
; Check that building a vector from floats doesn't insert an unnecessary
; copy for lane zero.
@@ -120,8 +120,8 @@ define <2 x double> @poszero_v2f64(<2 x double> %a) {
define <2 x double> @negzero_v2f64(<2 x double> %a) {
; CHECK-LABEL: negzero_v2f64:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov x8, #-9223372036854775808 // =0x8000000000000000
-; CHECK-NEXT: dup v1.2d, x8
+; CHECK-NEXT: movi v1.2d, #0000000000000000
+; CHECK-NEXT: fneg v1.2d, v1.2d
; CHECK-NEXT: fmul v0.2d, v0.2d, v1.2d
; CHECK-NEXT: ret
%b = fmul <2 x double> %a, <double -0.0, double -0.0>
diff --git a/llvm/test/CodeGen/AArch64/arm64-movi.ll b/llvm/test/CodeGen/AArch64/arm64-movi.ll
index 8ec98b7..c9074c2 100644
--- a/llvm/test/CodeGen/AArch64/arm64-movi.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-movi.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-eabi | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 | FileCheck %s
;==--------------------------------------------------------------------------==
; Tests for MOV-immediate implemented with ORR-immediate.
diff --git a/llvm/test/CodeGen/AArch64/arm64-popcnt.ll b/llvm/test/CodeGen/AArch64/arm64-popcnt.ll
index 599fac8..f5ce73a3 100644
--- a/llvm/test/CodeGen/AArch64/arm64-popcnt.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-popcnt.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
-; RUN: llc < %s -mtriple=aarch64-eabi -mattr -neon -aarch64-neon-syntax=apple | FileCheck -check-prefix=CHECK-NONEON %s
-; RUN: llc < %s -mtriple=aarch64-eabi -mattr +cssc -aarch64-neon-syntax=apple | FileCheck -check-prefix=CHECK-CSSC %s
+; RUN: llc < %s -mtriple=aarch64 -mattr -neon -aarch64-neon-syntax=apple | FileCheck -check-prefix=CHECK-NONEON %s
+; RUN: llc < %s -mtriple=aarch64 -mattr +cssc -aarch64-neon-syntax=apple | FileCheck -check-prefix=CHECK-CSSC %s
define i32 @cnt32_advsimd(i32 %x) nounwind readnone {
; CHECK-LABEL: cnt32_advsimd:
diff --git a/llvm/test/CodeGen/AArch64/arm64-rev.ll b/llvm/test/CodeGen/AArch64/arm64-rev.ll
index 5f61d90..f548a0e 100644
--- a/llvm/test/CodeGen/AArch64/arm64-rev.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-rev.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-eabi -aarch64-neon-syntax=apple | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc < %s -mtriple=aarch64-eabi -aarch64-neon-syntax=apple -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc < %s -mtriple=aarch64 -aarch64-neon-syntax=apple | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -mtriple=aarch64 -aarch64-neon-syntax=apple -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define i32 @test_rev_w(i32 %a) nounwind {
; CHECK-LABEL: test_rev_w:
diff --git a/llvm/test/CodeGen/AArch64/asm-large-immediate.ll b/llvm/test/CodeGen/AArch64/asm-large-immediate.ll
index 8369071..b45f22c 100644
--- a/llvm/test/CodeGen/AArch64/asm-large-immediate.ll
+++ b/llvm/test/CodeGen/AArch64/asm-large-immediate.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=aarch64-eabi -no-integrated-as | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -no-integrated-as | FileCheck %s
define void @test() {
entry:
diff --git a/llvm/test/CodeGen/AArch64/basic-block-sections-cold.ll b/llvm/test/CodeGen/AArch64/basic-block-sections-cold.ll
new file mode 100644
index 0000000..6641ef6
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/basic-block-sections-cold.ll
@@ -0,0 +1,51 @@
+;; Check if basic blocks that don't get unique sections are placed in cold sections.
+;; Basic block with id 1 and 2 must be in the cold section.
+;;
+;; Profile for version 0
+; RUN: echo '!_Z3bazb' > %t1
+; RUN: echo '!!0' >> %t1
+;;
+;; Profile for version 1
+; RUN: echo 'v1' > %t2
+; RUN: echo 'f _Z3bazb' >> %t2
+; RUN: echo 'c 0' >> %t2
+;;
+; RUN: llc < %s -mtriple=aarch64 -function-sections -basic-block-sections=%t1 -unique-basic-block-section-names | FileCheck %s -check-prefix=SECTIONS
+; RUN: llc < %s -mtriple=aarch64 -function-sections -basic-block-sections=%t2 -unique-basic-block-section-names | FileCheck %s -check-prefix=SECTIONS
+; RUN: llc < %s -mtriple=aarch64 -function-sections -basic-block-sections=%t1 -unique-basic-block-section-names -bbsections-cold-text-prefix=".text.unlikely." | FileCheck %s -check-prefix=SPLIT
+
+define void @_Z3bazb(i1 zeroext %0) nounwind {
+ br i1 %0, label %2, label %4
+
+2: ; preds = %1
+ %3 = call i32 @_Z3barv()
+ br label %6
+
+4: ; preds = %1
+ %5 = call i32 @_Z3foov()
+ br label %6
+
+6: ; preds = %2, %4
+ ret void
+}
+
+declare i32 @_Z3barv() #1
+
+declare i32 @_Z3foov() #1
+
+; SECTIONS: .section .text.hot._Z3bazb,"ax",@progbits
+; SECTIONS: _Z3bazb:
+; Check that the basic block with id 1 doesn't get a section.
+; SECTIONS-NOT: .section .text{{.*}}._Z3bazb.1,"ax",@progbits,unique
+; Check that a single cold section is started here and id 1 and 2 blocks are placed here.
+; SECTIONS: .section .text.split._Z3bazb,"ax",@progbits
+; SECTIONS: _Z3bazb.cold:
+; SECTIONS-NOT: .section .text.hot._Z3bazb._Z3bazb.2,"ax",@progbits,unique
+; SECTIONS: .LBB0_2:
+; SECTIONS: .size _Z3bazb, .Lfunc_end{{[0-9]}}-_Z3bazb
+
+; SPLIT: .section .text.unlikely._Z3bazb,"ax",@progbits
+; SPLIT-NEXT: _Z3bazb.cold:
+; SPLIT-NEXT: bl _Z3barv
+; SPLIT: .LBB0_2:
+; SPLIT: .LBB_END0_2:
diff --git a/llvm/test/CodeGen/AArch64/basic-block-sections-unsafe.ll b/llvm/test/CodeGen/AArch64/basic-block-sections-unsafe.ll
new file mode 100644
index 0000000..a83a47c
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/basic-block-sections-unsafe.ll
@@ -0,0 +1,121 @@
+;; Check if basic blocks without unique sections are only placed in cold sections if it is safe
+;; to do so.
+;;
+;; Profile for version 0.
+; RUN: echo 'v1' > %t1
+; RUN: echo 'f _Z3asm_goto' >> %t1
+; RUN: echo 'c 0' >> %t1
+; RUN: echo 'f _Z3jump_table' >> %t1
+; RUN: echo 'c 0' >> %t1
+; RUN: echo 'f _Z3red_zone' >> %t1
+; RUN: echo 'c 0' >> %t1
+;;
+; RUN: llc < %s -mtriple=aarch64 -function-sections -basic-block-sections=%t1 -unique-basic-block-section-names -bbsections-cold-text-prefix=".text.unlikely." | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -function-sections -aarch64-min-jump-table-entries=4 -basic-block-sections=%t1 -unique-basic-block-section-names -bbsections-cold-text-prefix=".text.unlikely." | FileCheck %s -check-prefix=JUMP-TABLES
+; RUN: llc < %s -mtriple=aarch64 -function-sections -basic-block-sections=%t1 -unique-basic-block-section-names -bbsections-cold-text-prefix=".text.unlikely." | FileCheck %s -check-prefix=RED-ZONE
+
+define void @_Z3asm_goto(i1 zeroext %0, i1 zeroext %1) nounwind {
+ ;; Check that blocks containing or targeted by asm goto aren't split.
+ ; CHECK-LABEL: _Z3asm_goto
+ ; CHECK: .section .text.unlikely._Z3asm_goto,"ax",@progbits
+ ; CHECK-NEXT: _Z3asm_goto.cold:
+ ; CHECK-NEXT: bl bam
+ ; CHECK: .LBB0_4:
+ ; CHECK: ret
+ ; CHECK: .LBB_END0_4:
+
+ br i1 %0, label %3, label %5
+
+3: ; preds = %2
+ %4 = call i32 @bar()
+ callbr void asm sideeffect "nop", "!i"() #3
+ to label %asm.fallthrough [label %5]
+
+
+asm.fallthrough: ; preds = %3
+ br label %5
+
+5: ; preds = %2, %asm.fallthrough
+ %6 = call i32 @bar()
+ br i1 %1, label %7, label %9
+
+7:
+ %8 = call i32 @bam()
+ br label %9
+
+9: ; preds = %7
+ ret void
+}
+
+define i32 @_Z3jump_table(i32 %in) nounwind {
+ ;; Check that a cold block that contains a jump table dispatch or
+ ;; that is targeted by a jump table is not split.
+ ; JUMP-TABLES-LABEL: _Z3jump_table
+ ; JUMP-TABLES: .section .text.unlikely._Z3jump_table,"ax",@progbits
+ ; JUMP-TABLES-NEXT: _Z3jump_table.cold:
+ ; JUMP-TABLES-SAME: %common.ret
+ ; JUMP-TABLES-NOT: b bar
+ ; JUMP-TABLES-NOT: b baz
+ ; JUMP-TABLES-NOT: b qux
+ ; JUMP-TABLES-NOT: b bam
+
+ switch i32 %in, label %common.ret [
+ i32 0, label %cold1
+ i32 1, label %cold2
+ i32 2, label %cold3
+ i32 3, label %cold4
+ ]
+
+ common.ret: ; preds = %0
+ ret i32 0
+
+ cold1: ; preds = %0
+ %1 = tail call i32 @bar()
+ ret i32 %1
+
+ cold2: ; preds = %0
+ %2 = tail call i32 @baz()
+ ret i32 %2
+
+ cold3: ; preds = %0
+ %3 = tail call i32 @bam()
+ ret i32 %3
+
+ cold4: ; preds = %0
+ %4 = tail call i32 @qux()
+ ret i32 %4
+}
+
+define i32 @_Z3red_zone(i1 zeroext %0, i32 %a, i32 %b) nounwind {
+;; Check that cold blocks in functions with red zones aren't split.
+; RED-ZONE-LABEL: _Z3red_zone
+; MFS-REDZONE-AARCH64-NOT: _Z3red_zone.cold:
+ %a.addr = alloca i32, align 4
+ %b.addr = alloca i32, align 4
+ %x = alloca i32, align 4
+
+ br i1 %0, label %2, label %3
+
+2: ; preds = %1
+ store i32 %a, ptr %a.addr, align 4
+ store i32 %b, ptr %b.addr, align 4
+ br label %4
+
+3: ; preds = %1
+ store i32 %a, ptr %b.addr, align 4
+ store i32 %b, ptr %a.addr, align 4
+ br label %4
+
+4: ; preds = %3, %2
+ %tmp = load i32, ptr %a.addr, align 4
+ %tmp1 = load i32, ptr %b.addr, align 4
+ %add = add nsw i32 %tmp, %tmp1
+ store i32 %add, ptr %x, align 4
+ %tmp2 = load i32, ptr %x, align 4
+ ret i32 %tmp2
+}
+
+declare i32 @bar()
+declare i32 @baz()
+declare i32 @bam()
+declare i32 @qux()
diff --git a/llvm/test/CodeGen/AArch64/bf16-shuffle.ll b/llvm/test/CodeGen/AArch64/bf16-shuffle.ll
index cf0b438..d59de3c 100644
--- a/llvm/test/CodeGen/AArch64/bf16-shuffle.ll
+++ b/llvm/test/CodeGen/AArch64/bf16-shuffle.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-eabi -mattr=+v8.6a,+neon < %s | FileCheck %s
-; RUN: llc -mtriple=aarch64-eabi -mattr=+v8.6a,+neon,+bf16 < %s | FileCheck %s
-; RUN: llc -mtriple=aarch64-eabi -mattr=+v8.6a,+neon,+fullfp16,+bf16 < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64 -mattr=+v8.6a,+neon < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64 -mattr=+v8.6a,+neon,+bf16 < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64 -mattr=+v8.6a,+neon,+fullfp16,+bf16 < %s | FileCheck %s
%struct.float16x4x2_t = type { [2 x <4 x bfloat>] }
%struct.float16x8x2_t = type { [2 x <8 x bfloat>] }
diff --git a/llvm/test/CodeGen/AArch64/bf16.ll b/llvm/test/CodeGen/AArch64/bf16.ll
index 7a171c6..d3911ae4 100644
--- a/llvm/test/CodeGen/AArch64/bf16.ll
+++ b/llvm/test/CodeGen/AArch64/bf16.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -asm-verbose=0 -mtriple=arm64-eabi -mattr=+bf16 | FileCheck %s
-; RUN: llc < %s -asm-verbose=0 -mtriple=aarch64-eabi -mattr=+bf16 | FileCheck %s
+; RUN: llc < %s -asm-verbose=0 -mtriple=aarch64 -mattr=+bf16 | FileCheck %s
; test argument passing and simple load/store
diff --git a/llvm/test/CodeGen/AArch64/bitreverse.ll b/llvm/test/CodeGen/AArch64/bitreverse.ll
index be9f5b8..a6d3683 100644
--- a/llvm/test/CodeGen/AArch64/bitreverse.ll
+++ b/llvm/test/CodeGen/AArch64/bitreverse.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-eabi %s -o - | FileCheck %s
+; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s
; These tests just check that the plumbing is in place for @llvm.bitreverse.
diff --git a/llvm/test/CodeGen/AArch64/cmpwithshort.ll b/llvm/test/CodeGen/AArch64/cmpwithshort.ll
index 8dbfdae..3dbf64a 100644
--- a/llvm/test/CodeGen/AArch64/cmpwithshort.ll
+++ b/llvm/test/CodeGen/AArch64/cmpwithshort.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O3 -mtriple=aarch64-eabi | FileCheck %s
+; RUN: llc < %s -O3 -mtriple=aarch64 | FileCheck %s
define i16 @test_1cmp_signed_1(ptr %ptr1) {
; CHECK-LABEL: @test_1cmp_signed_1
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-add-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-add-scalable.ll
index 6f4f8d3..001046f 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-add-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-add-scalable.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s --mattr=+sve2 -o - | FileCheck %s
-target triple = "aarch64-arm-none-eabi"
+target triple = "aarch64"
; Expected to not transform as the type's minimum size is less than 128 bits.
define <vscale x 4 x i16> @complex_add_v4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-mul-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-mul-scalable.ll
index b0a3e46..1b8a21b 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-mul-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-mul-scalable.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s --mattr=+sve2 -o - | FileCheck %s
-target triple = "aarch64-arm-none-eabi"
+target triple = "aarch64"
; Expected to not transform as the type's minimum size is less than 128 bits.
define <vscale x 4 x i16> @complex_mul_v4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-i32-add-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-i32-add-scalable.ll
index 3118d86..1ce480b 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-i32-add-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-i32-add-scalable.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s --mattr=+sve2 -o - | FileCheck %s
-target triple = "aarch64-arm-none-eabi"
+target triple = "aarch64"
; Expected to transform
define <vscale x 4 x i32> @complex_add_v4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-i32-mul-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-i32-mul-scalable.ll
index 256ed10..d88eef9 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-i32-mul-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-i32-mul-scalable.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s --mattr=+sve2 -o - | FileCheck %s
-target triple = "aarch64-arm-none-eabi"
+target triple = "aarch64"
; Expected to transform
define <vscale x 4 x i32> @complex_mul_v4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-i64-add-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-i64-add-scalable.ll
index d9ec5fc..0b59be9 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-i64-add-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-i64-add-scalable.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s --mattr=+sve2 -o - | FileCheck %s
-target triple = "aarch64-arm-none-eabi"
+target triple = "aarch64"
; Expected to transform
define <vscale x 2 x i64> @complex_add_v2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-i64-mul-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-i64-mul-scalable.ll
index 2dec03b..16e1f3e 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-i64-mul-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-i64-mul-scalable.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s --mattr=+sve2 -o - | FileCheck %s
-target triple = "aarch64-arm-none-eabi"
+target triple = "aarch64"
; Expected to transform
define <vscale x 2 x i64> @complex_mul_v2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-i8-add-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-i8-add-scalable.ll
index e7ebd07..b631486 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-i8-add-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-i8-add-scalable.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s --mattr=+sve2 -o - | FileCheck %s
-target triple = "aarch64-arm-none-eabi"
+target triple = "aarch64"
; Expected to not transform as the type's minimum size is less than 128 bits.
define <vscale x 8 x i8> @complex_add_v8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-splat-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-splat-scalable.ll
index 0cbe2f4..2627f2a 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-splat-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-splat-scalable.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s --mattr=+sve -o - | FileCheck %s
-target triple = "aarch64-arm-none-eabi"
+target triple = "aarch64"
; a[i] * b[i] * (11.0 + 3.0.i);
;
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-splat.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-splat.ll
index 8de2ac5..ad9240b 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-splat.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-splat.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s --mattr=+complxnum -o - | FileCheck %s
-target triple = "aarch64-arm-none-eabi"
+target triple = "aarch64"
; a[i] * b[i] * (11.0 + 3.0.i);
diff --git a/llvm/test/CodeGen/AArch64/cond-br-tuning.ll b/llvm/test/CodeGen/AArch64/cond-br-tuning.ll
index dc00c41..87a315a 100644
--- a/llvm/test/CodeGen/AArch64/cond-br-tuning.ll
+++ b/llvm/test/CodeGen/AArch64/cond-br-tuning.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -debugify-and-strip-all-safe < %s -O3 -mtriple=aarch64-eabi -verify-machineinstrs | FileCheck %s
+; RUN: llc -debugify-and-strip-all-safe < %s -O3 -mtriple=aarch64 -verify-machineinstrs | FileCheck %s
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
-target triple = "aarch64-linaro-linux-gnueabi"
+target triple = "aarch64"
; CMN is an alias of ADDS.
diff --git a/llvm/test/CodeGen/AArch64/consthoist-gep.ll b/llvm/test/CodeGen/AArch64/consthoist-gep.ll
index d109dad..031ee35 100644
--- a/llvm/test/CodeGen/AArch64/consthoist-gep.ll
+++ b/llvm/test/CodeGen/AArch64/consthoist-gep.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-none-unknown-linuxeabi -consthoist-gep %s -o - | FileCheck %s
+; RUN: llc -mtriple=aarch64 -consthoist-gep %s -o - | FileCheck %s
%struct.blam = type { %struct.bar, %struct.bar.0, %struct.wobble, %struct.wombat, i8, i16, %struct.snork.2, %struct.foo, %struct.snork.3, %struct.wobble.4, %struct.quux, [9 x i16], %struct.spam, %struct.zot }
%struct.bar = type { i8, i8, %struct.snork }
diff --git a/llvm/test/CodeGen/AArch64/dbg-assign-tag-offset-mix-loc.ll b/llvm/test/CodeGen/AArch64/dbg-assign-tag-offset-mix-loc.ll
new file mode 100644
index 0000000..d9d0d98
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/dbg-assign-tag-offset-mix-loc.ll
@@ -0,0 +1,72 @@
+; RUN: llc -filetype=obj -o - %s | llvm-dwarfdump - | FileCheck %s
+; RUN: llc --try-experimental-debuginfo-iterators -filetype=obj -o - %s | llvm-dwarfdump - | FileCheck %s
+
+;; Similar to dbg-assign-tag-offset.ll except the variable 'x' has been removed
+;; and 'y' has an implicit location range as well as stack location range
+;; (according to the hand-modified debug info -- see the dbg.value).
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64-unknown-linux-android24"
+
+; CHECK: DW_TAG_variable
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_LLVM_tag_offset (0x80)
+; CHECK-NEXT: DW_AT_name ("y")
+
+define dso_local void @f() !dbg !14 {
+ %1 = alloca i32, align 4, !DIAssignID !31
+ %2 = alloca i32, align 4, !DIAssignID !32
+ call void @llvm.dbg.assign(metadata i1 undef, metadata !20, metadata !DIExpression(), metadata !32, metadata ptr %2, metadata !DIExpression(DW_OP_LLVM_tag_offset, 128)), !dbg !22
+ call void @llvm.dbg.value(metadata i32 2, metadata !20, metadata !DIExpression()), !dbg !22
+ call void @use(ptr null), !dbg !28
+ store i32 1, ptr %2, align 4, !dbg !23, !tbaa !24, !DIAssignID !33
+ call void @llvm.dbg.assign(metadata i32 1, metadata !20, metadata !DIExpression(), metadata !33, metadata ptr %2, metadata !DIExpression(DW_OP_LLVM_tag_offset, 128)), !dbg !22
+ call void @use(ptr nonnull %1), !dbg !28
+ call void @use(ptr nonnull %2), !dbg !29
+ ret void, !dbg !30
+}
+
+declare !dbg !5 void @use(ptr)
+
+declare void @llvm.dbg.value(metadata, metadata, metadata)
+declare void @llvm.dbg.assign(metadata, metadata, metadata, metadata, metadata, metadata)
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!8, !9, !10, !11, !12, !34}
+!llvm.ident = !{!13}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !1, producer: "clang version 10.0.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, retainedTypes: !3, nameTableKind: None)
+!1 = !DIFile(filename: "dbg.cc", directory: "/tmp")
+!2 = !{}
+!3 = !{!4, !5}
+!4 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: null, size: 64)
+!5 = !DISubprogram(name: "use", scope: !1, file: !1, line: 2, type: !6, flags: DIFlagPrototyped, spFlags: DISPFlagOptimized, retainedNodes: !2)
+!6 = !DISubroutineType(types: !7)
+!7 = !{null, !4}
+!8 = !{i32 7, !"Dwarf Version", i32 4}
+!9 = !{i32 2, !"Debug Info Version", i32 3}
+!10 = !{i32 1, !"wchar_size", i32 4}
+!11 = !{i32 7, !"PIC Level", i32 2}
+!12 = !{i32 7, !"PIE Level", i32 2}
+!13 = !{!"clang version 10.0.0"}
+!14 = distinct !DISubprogram(name: "f", scope: !1, file: !1, line: 4, type: !15, scopeLine: 4, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !17)
+!15 = !DISubroutineType(types: !16)
+!16 = !{null}
+!17 = !{!18, !20}
+!18 = !DILocalVariable(name: "x", scope: !14, file: !1, line: 5, type: !19)
+!19 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!20 = !DILocalVariable(name: "y", scope: !14, file: !1, line: 5, type: !19)
+!21 = !DILocation(line: 5, column: 3, scope: !14)
+!22 = !DILocation(line: 0, scope: !14)
+!23 = !DILocation(line: 5, column: 10, scope: !14)
+!24 = !{!25, !25, i64 0}
+!25 = !{!"int", !26, i64 0}
+!26 = !{!"omnipotent char", !27, i64 0}
+!27 = !{!"Simple C++ TBAA"}
+!28 = !DILocation(line: 6, column: 3, scope: !14)
+!29 = !DILocation(line: 7, column: 3, scope: !14)
+!30 = !DILocation(line: 8, column: 1, scope: !14)
+!31 = distinct !DIAssignID()
+!32 = distinct !DIAssignID()
+!33 = distinct !DIAssignID()
+!34 = !{i32 7, !"debug-info-assignment-tracking", i1 true}
diff --git a/llvm/test/CodeGen/AArch64/dbg-assign-tag-offset.ll b/llvm/test/CodeGen/AArch64/dbg-assign-tag-offset.ll
new file mode 100644
index 0000000..155e610
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/dbg-assign-tag-offset.ll
@@ -0,0 +1,76 @@
+; RUN: llc -filetype=obj -o - %s | llvm-dwarfdump - | FileCheck %s
+; RUN: llc --try-experimental-debuginfo-iterators -filetype=obj -o - %s | llvm-dwarfdump - | FileCheck %s
+
+;; Copied from dbg-value-tag-offset.ll. Check that variables with locations
+;; tracked with dbg.assigns with DW_OP_LLVM_TAG_offset operators in their
+;; expressions get a DW_AT_LLVM_tag_offset attribute on their DIE.
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64-unknown-linux-android24"
+
+; CHECK: DW_TAG_variable
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_LLVM_tag_offset (0x00)
+; CHECK-NEXT: DW_AT_name ("x")
+
+; CHECK: DW_TAG_variable
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_LLVM_tag_offset (0x80)
+; CHECK-NEXT: DW_AT_name ("y")
+
+define dso_local void @f() !dbg !14 {
+ %1 = alloca i32, align 4, !DIAssignID !31
+ call void @llvm.dbg.assign(metadata i1 undef, metadata !18, metadata !DIExpression(), metadata !31, metadata ptr %1, metadata !DIExpression(DW_OP_LLVM_tag_offset, 0)), !dbg !22
+ %2 = alloca i32, align 4, !DIAssignID !32
+ call void @llvm.dbg.assign(metadata i1 undef, metadata !20, metadata !DIExpression(), metadata !32, metadata ptr %2, metadata !DIExpression(DW_OP_LLVM_tag_offset, 128)), !dbg !22
+ store i32 1, ptr %2, align 4, !dbg !23, !tbaa !24, !DIAssignID !33
+ call void @llvm.dbg.assign(metadata i32 1, metadata !20, metadata !DIExpression(), metadata !33, metadata ptr %2, metadata !DIExpression(DW_OP_LLVM_tag_offset, 128)), !dbg !22
+ call void @use(ptr nonnull %1), !dbg !28
+ call void @use(ptr nonnull %2), !dbg !29
+ ret void, !dbg !30
+}
+
+declare !dbg !5 void @use(ptr)
+
+declare void @llvm.dbg.value(metadata, metadata, metadata)
+declare void @llvm.dbg.assign(metadata, metadata, metadata, metadata, metadata, metadata)
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!8, !9, !10, !11, !12, !34}
+!llvm.ident = !{!13}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !1, producer: "clang version 10.0.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, retainedTypes: !3, nameTableKind: None)
+!1 = !DIFile(filename: "dbg.cc", directory: "/tmp")
+!2 = !{}
+!3 = !{!4, !5}
+!4 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: null, size: 64)
+!5 = !DISubprogram(name: "use", scope: !1, file: !1, line: 2, type: !6, flags: DIFlagPrototyped, spFlags: DISPFlagOptimized, retainedNodes: !2)
+!6 = !DISubroutineType(types: !7)
+!7 = !{null, !4}
+!8 = !{i32 7, !"Dwarf Version", i32 4}
+!9 = !{i32 2, !"Debug Info Version", i32 3}
+!10 = !{i32 1, !"wchar_size", i32 4}
+!11 = !{i32 7, !"PIC Level", i32 2}
+!12 = !{i32 7, !"PIE Level", i32 2}
+!13 = !{!"clang version 10.0.0"}
+!14 = distinct !DISubprogram(name: "f", scope: !1, file: !1, line: 4, type: !15, scopeLine: 4, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !17)
+!15 = !DISubroutineType(types: !16)
+!16 = !{null}
+!17 = !{!18, !20}
+!18 = !DILocalVariable(name: "x", scope: !14, file: !1, line: 5, type: !19)
+!19 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!20 = !DILocalVariable(name: "y", scope: !14, file: !1, line: 5, type: !19)
+!21 = !DILocation(line: 5, column: 3, scope: !14)
+!22 = !DILocation(line: 0, scope: !14)
+!23 = !DILocation(line: 5, column: 10, scope: !14)
+!24 = !{!25, !25, i64 0}
+!25 = !{!"int", !26, i64 0}
+!26 = !{!"omnipotent char", !27, i64 0}
+!27 = !{!"Simple C++ TBAA"}
+!28 = !DILocation(line: 6, column: 3, scope: !14)
+!29 = !DILocation(line: 7, column: 3, scope: !14)
+!30 = !DILocation(line: 8, column: 1, scope: !14)
+!31 = distinct !DIAssignID()
+!32 = distinct !DIAssignID()
+!33 = distinct !DIAssignID()
+!34 = !{i32 7, !"debug-info-assignment-tracking", i1 true}
diff --git a/llvm/test/CodeGen/AArch64/extbinopload.ll b/llvm/test/CodeGen/AArch64/extbinopload.ll
index 849fc7a..1f68c77 100644
--- a/llvm/test/CodeGen/AArch64/extbinopload.ll
+++ b/llvm/test/CodeGen/AArch64/extbinopload.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s
define <4 x i16> @normal_load_v4i8(ptr %p) {
; CHECK-LABEL: normal_load_v4i8:
diff --git a/llvm/test/CodeGen/AArch64/extract-sext-zext.ll b/llvm/test/CodeGen/AArch64/extract-sext-zext.ll
index f566ebb..ecb76d9 100644
--- a/llvm/test/CodeGen/AArch64/extract-sext-zext.ll
+++ b/llvm/test/CodeGen/AArch64/extract-sext-zext.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-eabi %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-ISEL
-; RUN: llc -mtriple=aarch64-eabi -global-isel %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GLOBAL
+; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-ISEL
+; RUN: llc -mtriple=aarch64 -global-isel %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GLOBAL
define i64 @extract_v2i64(<2 x i64> %x, i32 %y) {
; CHECK-ISEL-LABEL: extract_v2i64:
diff --git a/llvm/test/CodeGen/AArch64/fabs-combine.ll b/llvm/test/CodeGen/AArch64/fabs-combine.ll
index 23bf7a69..d083f20 100644
--- a/llvm/test/CodeGen/AArch64/fabs-combine.ll
+++ b/llvm/test/CodeGen/AArch64/fabs-combine.ll
@@ -71,8 +71,8 @@ define <4 x float> @nabsv4f32(<4 x float> %a) {
define <2 x double> @nabsv2d64(<2 x double> %a) {
; CHECK-LABEL: nabsv2d64:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov x8, #-9223372036854775808
-; CHECK-NEXT: dup v1.2d, x8
+; CHECK-NEXT: movi v1.2d, #0000000000000000
+; CHECK-NEXT: fneg v1.2d, v1.2d
; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ret
%conv = bitcast <2 x double> %a to <2 x i64>
diff --git a/llvm/test/CodeGen/AArch64/fabs.ll b/llvm/test/CodeGen/AArch64/fabs.ll
index c56c6a0..5462bc6 100644
--- a/llvm/test/CodeGen/AArch64/fabs.ll
+++ b/llvm/test/CodeGen/AArch64/fabs.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
define double @fabs_f64(double %a) {
; CHECK-LABEL: fabs_f64:
diff --git a/llvm/test/CodeGen/AArch64/faddsub.ll b/llvm/test/CodeGen/AArch64/faddsub.ll
index c02e02d..31389f5 100644
--- a/llvm/test/CodeGen/AArch64/faddsub.ll
+++ b/llvm/test/CodeGen/AArch64/faddsub.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
define double @fadd_f64(double %a, double %b) {
; CHECK-LABEL: fadd_f64:
diff --git a/llvm/test/CodeGen/AArch64/fcmp.ll b/llvm/test/CodeGen/AArch64/fcmp.ll
index 29138ba..0f02784 100644
--- a/llvm/test/CodeGen/AArch64/fcmp.ll
+++ b/llvm/test/CodeGen/AArch64/fcmp.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
+; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
define double @f64_double(double %a, double %b, double %d, double %e) {
; CHECK-LABEL: f64_double:
diff --git a/llvm/test/CodeGen/AArch64/fcopysign.ll b/llvm/test/CodeGen/AArch64/fcopysign.ll
index 4abd115..a1c48bd 100644
--- a/llvm/test/CodeGen/AArch64/fcopysign.ll
+++ b/llvm/test/CodeGen/AArch64/fcopysign.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define double @copysign_f64(double %a, double %b) {
; CHECK-SD-LABEL: copysign_f64:
diff --git a/llvm/test/CodeGen/AArch64/fcvt.ll b/llvm/test/CodeGen/AArch64/fcvt.ll
index ce38bebf..584174d 100644
--- a/llvm/test/CodeGen/AArch64/fcvt.ll
+++ b/llvm/test/CodeGen/AArch64/fcvt.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
define double @ceil_f64(double %a) {
; CHECK-LABEL: ceil_f64:
diff --git a/llvm/test/CodeGen/AArch64/fcvt_combine.ll b/llvm/test/CodeGen/AArch64/fcvt_combine.ll
index b5b9055f..37133cf 100644
--- a/llvm/test/CodeGen/AArch64/fcvt_combine.ll
+++ b/llvm/test/CodeGen/AArch64/fcvt_combine.ll
@@ -110,8 +110,8 @@ define <2 x i32> @test9(<2 x float> %f) {
define <2 x i32> @test10(<2 x float> %f) {
; CHECK-LABEL: test10:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #2143289344 // =0x7fc00000
-; CHECK-NEXT: dup v0.2s, w8
+; CHECK-NEXT: mvni v0.2s, #63, msl #16
+; CHECK-NEXT: fneg v0.2s, v0.2s
; CHECK-NEXT: fcvtzu v0.2s, v0.2s
; CHECK-NEXT: ret
%mul.i = fmul <2 x float> %f, <float undef, float undef>
@@ -426,8 +426,8 @@ define <2 x i32> @test9_sat(<2 x float> %f) {
define <2 x i32> @test10_sat(<2 x float> %f) {
; CHECK-LABEL: test10_sat:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #2143289344 // =0x7fc00000
-; CHECK-NEXT: dup v0.2s, w8
+; CHECK-NEXT: mvni v0.2s, #63, msl #16
+; CHECK-NEXT: fneg v0.2s, v0.2s
; CHECK-NEXT: fcvtzu v0.2s, v0.2s
; CHECK-NEXT: ret
%mul.i = fmul <2 x float> %f, <float undef, float undef>
diff --git a/llvm/test/CodeGen/AArch64/fdiv.ll b/llvm/test/CodeGen/AArch64/fdiv.ll
index b7a645b..fa87c4f 100644
--- a/llvm/test/CodeGen/AArch64/fdiv.ll
+++ b/llvm/test/CodeGen/AArch64/fdiv.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
define double @fdiv_f64(double %a, double %b) {
; CHECK-LABEL: fdiv_f64:
diff --git a/llvm/test/CodeGen/AArch64/fence-singlethread.ll b/llvm/test/CodeGen/AArch64/fence-singlethread.ll
index f36d289..eb77daa 100644
--- a/llvm/test/CodeGen/AArch64/fence-singlethread.ll
+++ b/llvm/test/CodeGen/AArch64/fence-singlethread.ll
@@ -1,7 +1,7 @@
; RUN: llc -mtriple=aarch64-linux-gnu %s -o - | FileCheck %s --check-prefix=LINUX
; RUN: llc -mtriple=aarch64-apple-ios %s -o - | FileCheck %s --check-prefix=IOS
; RUN: llc -mtriple=aarch64-apple-ios %s -o - -global-isel | FileCheck %s --check-prefix=IOS
-; RUN: llc -mtriple=aarch64-linux-gnueabihf %s -filetype=obj -o %t
+; RUN: llc -mtriple=aarch64 %s -filetype=obj -o %t
; RUN: llvm-objdump -d %t | FileCheck %s --check-prefix=OBJ
; OBJ-NOT: dmb
diff --git a/llvm/test/CodeGen/AArch64/fexplog.ll b/llvm/test/CodeGen/AArch64/fexplog.ll
index 2848a6b..e3c0ced 100644
--- a/llvm/test/CodeGen/AArch64/fexplog.ll
+++ b/llvm/test/CodeGen/AArch64/fexplog.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define double @exp_f64(double %a) {
; CHECK-LABEL: exp_f64:
diff --git a/llvm/test/CodeGen/AArch64/fminimummaximum.ll b/llvm/test/CodeGen/AArch64/fminimummaximum.ll
index 217e4e4..f0e946c 100644
--- a/llvm/test/CodeGen/AArch64/fminimummaximum.ll
+++ b/llvm/test/CodeGen/AArch64/fminimummaximum.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-NOFP16-SD
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-FP16-SD
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-NOFP16-GI
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-FP16-GI
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-NOFP16-SD
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-FP16-SD
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-NOFP16-GI
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-FP16-GI
define double @min_f64(double %a, double %b) {
; CHECK-LABEL: min_f64:
diff --git a/llvm/test/CodeGen/AArch64/fminmax.ll b/llvm/test/CodeGen/AArch64/fminmax.ll
index 1b92c46..cdf9973 100644
--- a/llvm/test/CodeGen/AArch64/fminmax.ll
+++ b/llvm/test/CodeGen/AArch64/fminmax.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-NOFP16-SD
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-FP16-SD
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-NOFP16-GI
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-FP16-GI
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-NOFP16-SD
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-FP16-SD
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-NOFP16-GI
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-FP16-GI
define double @min_f64(double %a, double %b) {
; CHECK-LABEL: min_f64:
diff --git a/llvm/test/CodeGen/AArch64/fmla.ll b/llvm/test/CodeGen/AArch64/fmla.ll
index 3ae2158..339ade5 100644
--- a/llvm/test/CodeGen/AArch64/fmla.ll
+++ b/llvm/test/CodeGen/AArch64/fmla.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
define double @fma_f64(double %a, double %b, double %c) {
; CHECK-LABEL: fma_f64:
diff --git a/llvm/test/CodeGen/AArch64/fmul.ll b/llvm/test/CodeGen/AArch64/fmul.ll
index 244c73b..fe84fe1 100644
--- a/llvm/test/CodeGen/AArch64/fmul.ll
+++ b/llvm/test/CodeGen/AArch64/fmul.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
define double @fmul_f64(double %a, double %b) {
; CHECK-LABEL: fmul_f64:
diff --git a/llvm/test/CodeGen/AArch64/fneg.ll b/llvm/test/CodeGen/AArch64/fneg.ll
index fd42768..7805512 100644
--- a/llvm/test/CodeGen/AArch64/fneg.ll
+++ b/llvm/test/CodeGen/AArch64/fneg.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
define double @fabs_f64(double %a) {
; CHECK-LABEL: fabs_f64:
diff --git a/llvm/test/CodeGen/AArch64/fp16_intrinsic_lane.ll b/llvm/test/CodeGen/AArch64/fp16_intrinsic_lane.ll
index f68691a..e9fbaf6 100644
--- a/llvm/test/CodeGen/AArch64/fp16_intrinsic_lane.ll
+++ b/llvm/test/CodeGen/AArch64/fp16_intrinsic_lane.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-eabi -mattr=+v8.2a,+fullfp16 | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -mattr=+v8.2a,+fullfp16 | FileCheck %s
declare half @llvm.aarch64.neon.fmulx.f16(half, half)
declare <4 x half> @llvm.aarch64.neon.fmulx.v4f16(<4 x half>, <4 x half>)
diff --git a/llvm/test/CodeGen/AArch64/fp16_intrinsic_scalar_1op.ll b/llvm/test/CodeGen/AArch64/fp16_intrinsic_scalar_1op.ll
index 62284ca..40d2d63 100644
--- a/llvm/test/CodeGen/AArch64/fp16_intrinsic_scalar_1op.ll
+++ b/llvm/test/CodeGen/AArch64/fp16_intrinsic_scalar_1op.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-eabi -mattr=+v8.2a,+fullfp16 | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -mattr=+v8.2a,+fullfp16 | FileCheck %s
declare i64 @llvm.aarch64.neon.fcvtpu.i64.f16(half)
declare i32 @llvm.aarch64.neon.fcvtpu.i32.f16(half)
diff --git a/llvm/test/CodeGen/AArch64/fp16_intrinsic_scalar_2op.ll b/llvm/test/CodeGen/AArch64/fp16_intrinsic_scalar_2op.ll
index 111ddfe..36795f8 100644
--- a/llvm/test/CodeGen/AArch64/fp16_intrinsic_scalar_2op.ll
+++ b/llvm/test/CodeGen/AArch64/fp16_intrinsic_scalar_2op.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-eabi -mattr=+v8.2a,+fullfp16 | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -mattr=+v8.2a,+fullfp16 | FileCheck %s
declare half @llvm.aarch64.sisd.fabd.f16(half, half)
declare half @llvm.aarch64.neon.fmax.f16(half, half)
diff --git a/llvm/test/CodeGen/AArch64/fp16_intrinsic_scalar_3op.ll b/llvm/test/CodeGen/AArch64/fp16_intrinsic_scalar_3op.ll
index ba8e616e..9a52e21 100644
--- a/llvm/test/CodeGen/AArch64/fp16_intrinsic_scalar_3op.ll
+++ b/llvm/test/CodeGen/AArch64/fp16_intrinsic_scalar_3op.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=aarch64-eabi -mattr=+v8.2a,+neon,+fullfp16 | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -mattr=+v8.2a,+neon,+fullfp16 | FileCheck %s
define dso_local half @t_vfmah_f16(half %a, half %b, half %c) {
; CHECK-LABEL: t_vfmah_f16:
diff --git a/llvm/test/CodeGen/AArch64/fp16_intrinsic_vector_1op.ll b/llvm/test/CodeGen/AArch64/fp16_intrinsic_vector_1op.ll
index becbbdd..58cbc29 100644
--- a/llvm/test/CodeGen/AArch64/fp16_intrinsic_vector_1op.ll
+++ b/llvm/test/CodeGen/AArch64/fp16_intrinsic_vector_1op.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=aarch64-eabi -mattr=+v8.2a,+fullfp16 | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -mattr=+v8.2a,+fullfp16 | FileCheck %s
declare <4 x half> @llvm.nearbyint.v4f16(<4 x half>)
declare <8 x half> @llvm.nearbyint.v8f16(<8 x half>)
diff --git a/llvm/test/CodeGen/AArch64/fp16_intrinsic_vector_2op.ll b/llvm/test/CodeGen/AArch64/fp16_intrinsic_vector_2op.ll
index 1674d86..e29919a 100644
--- a/llvm/test/CodeGen/AArch64/fp16_intrinsic_vector_2op.ll
+++ b/llvm/test/CodeGen/AArch64/fp16_intrinsic_vector_2op.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=aarch64-eabi -mattr=+v8.2a,+fullfp16 | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -mattr=+v8.2a,+fullfp16 | FileCheck %s
declare <4 x half> @llvm.aarch64.neon.fmulx.v4f16(<4 x half>, <4 x half>)
declare <8 x half> @llvm.aarch64.neon.fmulx.v8f16(<8 x half>, <8 x half>)
diff --git a/llvm/test/CodeGen/AArch64/fp16_intrinsic_vector_3op.ll b/llvm/test/CodeGen/AArch64/fp16_intrinsic_vector_3op.ll
index c8a33a6..8d52d2a 100644
--- a/llvm/test/CodeGen/AArch64/fp16_intrinsic_vector_3op.ll
+++ b/llvm/test/CodeGen/AArch64/fp16_intrinsic_vector_3op.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=aarch64-eabi -mattr=+v8.2a,+fullfp16 | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -mattr=+v8.2a,+fullfp16 | FileCheck %s
declare <4 x half> @llvm.fma.v4f16(<4 x half>, <4 x half>, <4 x half>)
declare <8 x half> @llvm.fma.v8f16(<8 x half>, <8 x half>, <8 x half>)
diff --git a/llvm/test/CodeGen/AArch64/fpext.ll b/llvm/test/CodeGen/AArch64/fpext.ll
index 9635b88..db1105d 100644
--- a/llvm/test/CodeGen/AArch64/fpext.ll
+++ b/llvm/test/CodeGen/AArch64/fpext.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel=0 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel=1 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc -mtriple=aarch64 -global-isel=0 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64 -global-isel=1 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define double @fpext_f32_f64(float %a) {
; CHECK-LABEL: fpext_f32_f64:
diff --git a/llvm/test/CodeGen/AArch64/fpow.ll b/llvm/test/CodeGen/AArch64/fpow.ll
index a55c0db..1dd5450 100644
--- a/llvm/test/CodeGen/AArch64/fpow.ll
+++ b/llvm/test/CodeGen/AArch64/fpow.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define double @pow_f64(double %a, double %b) {
; CHECK-LABEL: pow_f64:
diff --git a/llvm/test/CodeGen/AArch64/fpowi.ll b/llvm/test/CodeGen/AArch64/fpowi.ll
index 677d2e0..b496c7d 100644
--- a/llvm/test/CodeGen/AArch64/fpowi.ll
+++ b/llvm/test/CodeGen/AArch64/fpowi.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define double @powi_f64(double %a, i32 %b) {
; CHECK-LABEL: powi_f64:
diff --git a/llvm/test/CodeGen/AArch64/fptoi.ll b/llvm/test/CodeGen/AArch64/fptoi.ll
index 23ba85d..251719c 100644
--- a/llvm/test/CodeGen/AArch64/fptoi.ll
+++ b/llvm/test/CodeGen/AArch64/fptoi.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
+; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
; CHECK-GI-FP16: warning: Instruction selection used fallback path for fptos_v2f16_v2i16
; CHECK-GI-FP16-NEXT: warning: Instruction selection used fallback path for fptou_v2f16_v2i16
diff --git a/llvm/test/CodeGen/AArch64/fptrunc.ll b/llvm/test/CodeGen/AArch64/fptrunc.ll
index 813fa03..9425988 100644
--- a/llvm/test/CodeGen/AArch64/fptrunc.ll
+++ b/llvm/test/CodeGen/AArch64/fptrunc.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel=0 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel=1 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc -mtriple=aarch64 -global-isel=0 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64 -global-isel=1 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define float @fptrunc_f64_f32(double %a) {
; CHECK-LABEL: fptrunc_f64_f32:
diff --git a/llvm/test/CodeGen/AArch64/frem.ll b/llvm/test/CodeGen/AArch64/frem.ll
index eb26128..03caf0a 100644
--- a/llvm/test/CodeGen/AArch64/frem.ll
+++ b/llvm/test/CodeGen/AArch64/frem.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define double @frem_f64(double %a, double %b) {
; CHECK-LABEL: frem_f64:
diff --git a/llvm/test/CodeGen/AArch64/frintn.ll b/llvm/test/CodeGen/AArch64/frintn.ll
index 2dc03db..782ba1d 100644
--- a/llvm/test/CodeGen/AArch64/frintn.ll
+++ b/llvm/test/CodeGen/AArch64/frintn.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=aarch64-eabi -mattr=+fullfp16 %s -o - | FileCheck %s
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 %s -o - | FileCheck %s
; The llvm.aarch64.neon.frintn intrinsic should be auto-upgraded to the
; target-independent roundeven intrinsic.
diff --git a/llvm/test/CodeGen/AArch64/fsincos.ll b/llvm/test/CodeGen/AArch64/fsincos.ll
index aef0b2e..2c76d96 100644
--- a/llvm/test/CodeGen/AArch64/fsincos.ll
+++ b/llvm/test/CodeGen/AArch64/fsincos.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define double @sin_f64(double %a) {
; CHECK-LABEL: sin_f64:
diff --git a/llvm/test/CodeGen/AArch64/fsqrt.ll b/llvm/test/CodeGen/AArch64/fsqrt.ll
index 76930e7..683544a 100644
--- a/llvm/test/CodeGen/AArch64/fsqrt.ll
+++ b/llvm/test/CodeGen/AArch64/fsqrt.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
define double @sqrt_f64(double %a) {
; CHECK-LABEL: sqrt_f64:
diff --git a/llvm/test/CodeGen/AArch64/hints.ll b/llvm/test/CodeGen/AArch64/hints.ll
index f23c7b0..61a3fa4 100644
--- a/llvm/test/CodeGen/AArch64/hints.ll
+++ b/llvm/test/CodeGen/AArch64/hints.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple aarch64-eabi -o - %s | FileCheck %s
+; RUN: llc -mtriple=aarch64 -o - %s | FileCheck %s
declare void @llvm.aarch64.hint(i32) nounwind
diff --git a/llvm/test/CodeGen/AArch64/icmp.ll b/llvm/test/CodeGen/AArch64/icmp.ll
index 789bc99..2e8c93a 100644
--- a/llvm/test/CodeGen/AArch64/icmp.ll
+++ b/llvm/test/CodeGen/AArch64/icmp.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define i64 @i64_i64(i64 %a, i64 %b, i64 %d, i64 %e) {
; CHECK-LABEL: i64_i64:
diff --git a/llvm/test/CodeGen/AArch64/insertextract.ll b/llvm/test/CodeGen/AArch64/insertextract.ll
index 794abca..6074d44 100644
--- a/llvm/test/CodeGen/AArch64/insertextract.ll
+++ b/llvm/test/CodeGen/AArch64/insertextract.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
; CHECK-GI: warning: Instruction selection used fallback path for insert_v2f64_c
; CHECK-GI-NEXT: warning: Instruction selection used fallback path for insert_v3f64_c
diff --git a/llvm/test/CodeGen/AArch64/intrinsics-memory-barrier.ll b/llvm/test/CodeGen/AArch64/intrinsics-memory-barrier.ll
index 6193997..ea4205f 100644
--- a/llvm/test/CodeGen/AArch64/intrinsics-memory-barrier.ll
+++ b/llvm/test/CodeGen/AArch64/intrinsics-memory-barrier.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=aarch64-eabi -O=3 | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -O=3 | FileCheck %s
define void @test() {
; CHECK: dmb sy
diff --git a/llvm/test/CodeGen/AArch64/itofp.ll b/llvm/test/CodeGen/AArch64/itofp.ll
index fa1ab61..85689b6 100644
--- a/llvm/test/CodeGen/AArch64/itofp.ll
+++ b/llvm/test/CodeGen/AArch64/itofp.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
+; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
; CHECK-GI: warning: Instruction selection used fallback path for stofp_v3i8_v3f64
; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_v3i8_v3f64
diff --git a/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll b/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll
index cf63835..91cf605 100644
--- a/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll
+++ b/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=aarch64-eabi -aarch64-neon-syntax=apple -aarch64-enable-stp-suppress=false -verify-machineinstrs -asm-verbose=false | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -aarch64-neon-syntax=apple -aarch64-enable-stp-suppress=false -verify-machineinstrs -asm-verbose=false | FileCheck %s
; CHECK-LABEL: test_strd_sturd:
; CHECK-NEXT: stp d0, d1, [x0, #-8]
diff --git a/llvm/test/CodeGen/AArch64/legalize-bug-bogus-cpu.ll b/llvm/test/CodeGen/AArch64/legalize-bug-bogus-cpu.ll
index a96a3c5..a949abb 100644
--- a/llvm/test/CodeGen/AArch64/legalize-bug-bogus-cpu.ll
+++ b/llvm/test/CodeGen/AArch64/legalize-bug-bogus-cpu.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=aarch64-eabi -mcpu=bogus
+; RUN: llc < %s -mtriple=aarch64 -mcpu=bogus
; Fix the bug in PR20557. Set mcpu to a bogus name, llc will crash in type
; legalization.
diff --git a/llvm/test/CodeGen/AArch64/merge-scoped-aa-store.ll b/llvm/test/CodeGen/AArch64/merge-scoped-aa-store.ll
index 871bc3b..23011df 100644
--- a/llvm/test/CodeGen/AArch64/merge-scoped-aa-store.ll
+++ b/llvm/test/CodeGen/AArch64/merge-scoped-aa-store.ll
@@ -1,4 +1,4 @@
-; RUN: llc %s -mtriple=aarch64-eabi -stop-after=finalize-isel -o - | FileCheck --check-prefix=MIR %s
+; RUN: llc %s -mtriple=aarch64 -stop-after=finalize-isel -o - | FileCheck --check-prefix=MIR %s
; Ensure the scoped AA metadata is still retained after store merging.
diff --git a/llvm/test/CodeGen/AArch64/merge-store-dependency.ll b/llvm/test/CodeGen/AArch64/merge-store-dependency.ll
index fc5813b..3c42987 100644
--- a/llvm/test/CodeGen/AArch64/merge-store-dependency.ll
+++ b/llvm/test/CodeGen/AArch64/merge-store-dependency.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mcpu cortex-a53 -mtriple=aarch64-eabi | FileCheck %s --check-prefix=A53
+; RUN: llc < %s -mcpu cortex-a53 -mtriple=aarch64 | FileCheck %s --check-prefix=A53
; PR26827 - Merge stores causes wrong dependency.
%struct1 = type { ptr, ptr, i32, i32, i16, i16, ptr, ptr }
diff --git a/llvm/test/CodeGen/AArch64/merge-store.ll b/llvm/test/CodeGen/AArch64/merge-store.ll
index f720103..b93d0c3 100644
--- a/llvm/test/CodeGen/AArch64/merge-store.ll
+++ b/llvm/test/CodeGen/AArch64/merge-store.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=aarch64-unknown-unknown -mcpu=cyclone -mattr=+slow-misaligned-128store | FileCheck %s --check-prefixes=CHECK,SPLITTING
-; RUN: llc < %s -mtriple=aarch64-eabi -mattr=-slow-misaligned-128store | FileCheck %s --check-prefixes=CHECK,MISALIGNED
+; RUN: llc < %s -mtriple=aarch64 -mattr=-slow-misaligned-128store | FileCheck %s --check-prefixes=CHECK,MISALIGNED
@g0 = external dso_local global <3 x float>, align 16
@g1 = external dso_local global <3 x float>, align 4
diff --git a/llvm/test/CodeGen/AArch64/min-max-combine.ll b/llvm/test/CodeGen/AArch64/min-max-combine.ll
index 535d2ba..5111f83 100644
--- a/llvm/test/CodeGen/AArch64/min-max-combine.ll
+++ b/llvm/test/CodeGen/AArch64/min-max-combine.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=aarch64-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-ISEL
-; RUN: llc -mtriple=aarch64-eabi %s -o - -mattr=cssc | FileCheck %s --check-prefixes=CHECK-CSSC
-; RUN: llc -mtriple=aarch64-eabi -global-isel %s -o - | FileCheck %s --check-prefixes=CHECK-GLOBAL
-; RUN: llc -mtriple=aarch64-eabi -global-isel %s -o - -mattr=cssc | FileCheck %s --check-prefixes=CHECK-CSSC
+; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s --check-prefixes=CHECK-ISEL
+; RUN: llc -mtriple=aarch64 %s -o - -mattr=cssc | FileCheck %s --check-prefixes=CHECK-CSSC
+; RUN: llc -mtriple=aarch64 -global-isel %s -o - | FileCheck %s --check-prefixes=CHECK-GLOBAL
+; RUN: llc -mtriple=aarch64 -global-isel %s -o - -mattr=cssc | FileCheck %s --check-prefixes=CHECK-CSSC
; These tests check for @llvm.smax, @llvm.smin combines.
diff --git a/llvm/test/CodeGen/AArch64/min-max.ll b/llvm/test/CodeGen/AArch64/min-max.ll
index 8914406..0d02f1e 100644
--- a/llvm/test/CodeGen/AArch64/min-max.ll
+++ b/llvm/test/CodeGen/AArch64/min-max.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-eabi %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-ISEL
-; RUN: llc -mtriple=aarch64-eabi %s -o - -mattr=+cssc | FileCheck %s --check-prefixes=CHECK,CHECK-ISEL-CSSC
-; RUN: llc -mtriple=aarch64-eabi -global-isel %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GLOBAL
-; RUN: llc -mtriple=aarch64-eabi -global-isel %s -o - -mattr=+cssc | FileCheck %s --check-prefixes=CHECK,CHECK-GLOBAL-CSSC
+; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-ISEL
+; RUN: llc -mtriple=aarch64 %s -o - -mattr=+cssc | FileCheck %s --check-prefixes=CHECK,CHECK-ISEL-CSSC
+; RUN: llc -mtriple=aarch64 -global-isel %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GLOBAL
+; RUN: llc -mtriple=aarch64 -global-isel %s -o - -mattr=+cssc | FileCheck %s --check-prefixes=CHECK,CHECK-GLOBAL-CSSC
; These tests just check that the plumbing is in place for @llvm.smax, @llvm.umax,
; @llvm.smin, @llvm.umin.
diff --git a/llvm/test/CodeGen/AArch64/misched-fusion-addadrp.ll b/llvm/test/CodeGen/AArch64/misched-fusion-addadrp.ll
index a75c303..70b6b91 100644
--- a/llvm/test/CodeGen/AArch64/misched-fusion-addadrp.ll
+++ b/llvm/test/CodeGen/AArch64/misched-fusion-addadrp.ll
@@ -15,6 +15,9 @@
; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=neoverse-v2 | FileCheck %s
; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=apple-a16 -mattr=-fuse-literals | FileCheck %s
; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=apple-a17 -mattr=-fuse-literals | FileCheck %s
+; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=ampere1 -mattr=-fuse-literals | FileCheck %s
+; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=ampere1a -mattr=-fuse-literals | FileCheck %s
+; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=ampere1b -mattr=-fuse-literals | FileCheck %s
@g = common local_unnamed_addr global ptr null, align 8
diff --git a/llvm/test/CodeGen/AArch64/misched-fusion-addr.ll b/llvm/test/CodeGen/AArch64/misched-fusion-addr.ll
index 2934995..459b688 100644
--- a/llvm/test/CodeGen/AArch64/misched-fusion-addr.ll
+++ b/llvm/test/CodeGen/AArch64/misched-fusion-addr.ll
@@ -3,6 +3,9 @@
; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m3 | FileCheck %s
; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m4 | FileCheck %s
; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m5 | FileCheck %s
+; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=ampere1 | FileCheck %s
+; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=ampere1a | FileCheck %s
+; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=ampere1b | FileCheck %s
target triple = "aarch64-unknown"
diff --git a/llvm/test/CodeGen/AArch64/misched-fusion-aes.ll b/llvm/test/CodeGen/AArch64/misched-fusion-aes.ll
index ee3e808..bf16695 100644
--- a/llvm/test/CodeGen/AArch64/misched-fusion-aes.ll
+++ b/llvm/test/CodeGen/AArch64/misched-fusion-aes.ll
@@ -17,6 +17,9 @@
; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m3 | FileCheck %s
; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m4 | FileCheck %s
; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m5 | FileCheck %s
+; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=ampere1 | FileCheck %s
+; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=ampere1a | FileCheck %s
+; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=ampere1b | FileCheck %s
declare <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d, <16 x i8> %k)
declare <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %d)
diff --git a/llvm/test/CodeGen/AArch64/mul_pow2.ll b/llvm/test/CodeGen/AArch64/mul_pow2.ll
index e16ee40..90e560a 100644
--- a/llvm/test/CodeGen/AArch64/mul_pow2.ll
+++ b/llvm/test/CodeGen/AArch64/mul_pow2.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-eabi | FileCheck %s
-; RUN: llc < %s -mtriple=aarch64-eabi -global-isel -global-isel-abort=1 | FileCheck %s --check-prefix=GISEL
+; RUN: llc < %s -mtriple=aarch64 | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -global-isel -global-isel-abort=1 | FileCheck %s --check-prefix=GISEL
; Convert mul x, pow2 to shift.
; Convert mul x, pow2 +/- 1 to shift + add/sub.
diff --git a/llvm/test/CodeGen/AArch64/neon-extadd-extract.ll b/llvm/test/CodeGen/AArch64/neon-extadd-extract.ll
index d79c072..b3692f8 100644
--- a/llvm/test/CodeGen/AArch64/neon-extadd-extract.ll
+++ b/llvm/test/CodeGen/AArch64/neon-extadd-extract.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc < %s -mtriple aarch64-none-eabi -o - | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -o - | FileCheck %s
define <4 x i16> @addls_v8i8_0(<8 x i8> %s0, <8 x i8> %s1) {
; CHECK-LABEL: addls_v8i8_0:
diff --git a/llvm/test/CodeGen/AArch64/neon-mov.ll b/llvm/test/CodeGen/AArch64/neon-mov.ll
index 219c8b53..7effdc9 100644
--- a/llvm/test/CodeGen/AArch64/neon-mov.ll
+++ b/llvm/test/CodeGen/AArch64/neon-mov.ll
@@ -111,16 +111,14 @@ define <4 x i32> @movi4s_lsl16() {
define <4 x i32> @movi4s_fneg() {
; CHECK-NOFP16-SD-LABEL: movi4s_fneg:
; CHECK-NOFP16-SD: // %bb.0:
-; CHECK-NOFP16-SD-NEXT: mov w8, #61440 // =0xf000
-; CHECK-NOFP16-SD-NEXT: movk w8, #32768, lsl #16
-; CHECK-NOFP16-SD-NEXT: dup v0.4s, w8
+; CHECK-NOFP16-SD-NEXT: movi v0.4s, #240, lsl #8
+; CHECK-NOFP16-SD-NEXT: fneg v0.4s, v0.4s
; CHECK-NOFP16-SD-NEXT: ret
;
; CHECK-FP16-SD-LABEL: movi4s_fneg:
; CHECK-FP16-SD: // %bb.0:
-; CHECK-FP16-SD-NEXT: mov w8, #61440 // =0xf000
-; CHECK-FP16-SD-NEXT: movk w8, #32768, lsl #16
-; CHECK-FP16-SD-NEXT: dup v0.4s, w8
+; CHECK-FP16-SD-NEXT: movi v0.4s, #240, lsl #8
+; CHECK-FP16-SD-NEXT: fneg v0.4s, v0.4s
; CHECK-FP16-SD-NEXT: ret
;
; CHECK-NOFP16-GI-LABEL: movi4s_fneg:
@@ -178,11 +176,29 @@ define <8 x i16> @movi8h_lsl8() {
}
define <8 x i16> @movi8h_fneg() {
-; CHECK-LABEL: movi8h_fneg:
-; CHECK: // %bb.0:
-; CHECK-NEXT: adrp x8, .LCPI19_0
-; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI19_0]
-; CHECK-NEXT: ret
+; CHECK-NOFP16-SD-LABEL: movi8h_fneg:
+; CHECK-NOFP16-SD: // %bb.0:
+; CHECK-NOFP16-SD-NEXT: movi v0.8h, #127, lsl #8
+; CHECK-NOFP16-SD-NEXT: fneg v0.4s, v0.4s
+; CHECK-NOFP16-SD-NEXT: ret
+;
+; CHECK-FP16-SD-LABEL: movi8h_fneg:
+; CHECK-FP16-SD: // %bb.0:
+; CHECK-FP16-SD-NEXT: movi v0.8h, #127, lsl #8
+; CHECK-FP16-SD-NEXT: fneg v0.4s, v0.4s
+; CHECK-FP16-SD-NEXT: ret
+;
+; CHECK-NOFP16-GI-LABEL: movi8h_fneg:
+; CHECK-NOFP16-GI: // %bb.0:
+; CHECK-NOFP16-GI-NEXT: adrp x8, .LCPI19_0
+; CHECK-NOFP16-GI-NEXT: ldr q0, [x8, :lo12:.LCPI19_0]
+; CHECK-NOFP16-GI-NEXT: ret
+;
+; CHECK-FP16-GI-LABEL: movi8h_fneg:
+; CHECK-FP16-GI: // %bb.0:
+; CHECK-FP16-GI-NEXT: adrp x8, .LCPI19_0
+; CHECK-FP16-GI-NEXT: ldr q0, [x8, :lo12:.LCPI19_0]
+; CHECK-FP16-GI-NEXT: ret
ret <8 x i16> <i16 32512, i16 65280, i16 32512, i16 65280, i16 32512, i16 65280, i16 32512, i16 65280>
}
@@ -294,8 +310,8 @@ define <8 x i16> @mvni8h_neg() {
;
; CHECK-FP16-SD-LABEL: mvni8h_neg:
; CHECK-FP16-SD: // %bb.0:
-; CHECK-FP16-SD-NEXT: mov w8, #33008 // =0x80f0
-; CHECK-FP16-SD-NEXT: dup v0.8h, w8
+; CHECK-FP16-SD-NEXT: movi v0.8h, #240
+; CHECK-FP16-SD-NEXT: fneg v0.8h, v0.8h
; CHECK-FP16-SD-NEXT: ret
;
; CHECK-NOFP16-GI-LABEL: mvni8h_neg:
@@ -480,14 +496,14 @@ define <2 x double> @fmov2d() {
define <2 x double> @fmov2d_neg0() {
; CHECK-NOFP16-SD-LABEL: fmov2d_neg0:
; CHECK-NOFP16-SD: // %bb.0:
-; CHECK-NOFP16-SD-NEXT: mov x8, #-9223372036854775808 // =0x8000000000000000
-; CHECK-NOFP16-SD-NEXT: dup v0.2d, x8
+; CHECK-NOFP16-SD-NEXT: movi v0.2d, #0000000000000000
+; CHECK-NOFP16-SD-NEXT: fneg v0.2d, v0.2d
; CHECK-NOFP16-SD-NEXT: ret
;
; CHECK-FP16-SD-LABEL: fmov2d_neg0:
; CHECK-FP16-SD: // %bb.0:
-; CHECK-FP16-SD-NEXT: mov x8, #-9223372036854775808 // =0x8000000000000000
-; CHECK-FP16-SD-NEXT: dup v0.2d, x8
+; CHECK-FP16-SD-NEXT: movi v0.2d, #0000000000000000
+; CHECK-FP16-SD-NEXT: fneg v0.2d, v0.2d
; CHECK-FP16-SD-NEXT: ret
;
; CHECK-NOFP16-GI-LABEL: fmov2d_neg0:
diff --git a/llvm/test/CodeGen/AArch64/neon_rbit.ll b/llvm/test/CodeGen/AArch64/neon_rbit.ll
index 0daaf72..e66aca7 100644
--- a/llvm/test/CodeGen/AArch64/neon_rbit.ll
+++ b/llvm/test/CodeGen/AArch64/neon_rbit.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-eabi -mattr=+fullfp16 %s -o - | FileCheck %s
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 %s -o - | FileCheck %s
; The llvm.aarch64_neon_rbit intrinsic should be auto-upgraded to the
; target-independent bitreverse intrinsic.
diff --git a/llvm/test/CodeGen/AArch64/no-quad-ldp-stp.ll b/llvm/test/CodeGen/AArch64/no-quad-ldp-stp.ll
index 4fe16d8..b7dde88 100644
--- a/llvm/test/CodeGen/AArch64/no-quad-ldp-stp.ll
+++ b/llvm/test/CodeGen/AArch64/no-quad-ldp-stp.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -mtriple=aarch64-eabi -mattr=+slow-paired-128 -verify-machineinstrs -asm-verbose=false | FileCheck %s --check-prefixes=CHECK,SLOW
-; RUN: llc < %s -mtriple=aarch64-eabi -mcpu=exynos-m3 -verify-machineinstrs -asm-verbose=false | FileCheck %s --check-prefixes=CHECK,FAST
+; RUN: llc < %s -mtriple=aarch64 -mattr=+slow-paired-128 -verify-machineinstrs -asm-verbose=false | FileCheck %s --check-prefixes=CHECK,SLOW
+; RUN: llc < %s -mtriple=aarch64 -mcpu=exynos-m3 -verify-machineinstrs -asm-verbose=false | FileCheck %s --check-prefixes=CHECK,FAST
; CHECK-LABEL: test_nopair_st
; SLOW: str
diff --git a/llvm/test/CodeGen/AArch64/nzcv-save.ll b/llvm/test/CodeGen/AArch64/nzcv-save.ll
index 9bc4ccf..c40e529 100644
--- a/llvm/test/CodeGen/AArch64/nzcv-save.ll
+++ b/llvm/test/CodeGen/AArch64/nzcv-save.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-eabi | FileCheck %s
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64 | FileCheck %s
; DAG ends up with two uses for the flags from an ADCS node, which means they
; must be saved for later.
diff --git a/llvm/test/CodeGen/AArch64/pacbti-module-attrs.ll b/llvm/test/CodeGen/AArch64/pacbti-module-attrs.ll
index 1858f3b..ba47721 100644
--- a/llvm/test/CodeGen/AArch64/pacbti-module-attrs.ll
+++ b/llvm/test/CodeGen/AArch64/pacbti-module-attrs.ll
@@ -1,4 +1,4 @@
-;; RUN: llc -mtriple=aarch64-eabi -mattr=+v8.5a %s -o - | FileCheck %s
+;; RUN: llc -mtriple=aarch64 -mattr=+v8.5a %s -o - | FileCheck %s
declare i32 @g(i32) #5
diff --git a/llvm/test/CodeGen/AArch64/postra-mi-sched.ll b/llvm/test/CodeGen/AArch64/postra-mi-sched.ll
index 7688973..5abc06b 100644
--- a/llvm/test/CodeGen/AArch64/postra-mi-sched.ll
+++ b/llvm/test/CodeGen/AArch64/postra-mi-sched.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O3 -mtriple=aarch64-eabi -mcpu=cortex-a53 | FileCheck %s
+; RUN: llc < %s -O3 -mtriple=aarch64 -mcpu=cortex-a53 | FileCheck %s
; With cortex-a53, each of fmul and fcvt have latency of 6 cycles. After the
; pre-RA MI scheduler, fmul, fcvt and fdiv will be consecutive. The top-down
diff --git a/llvm/test/CodeGen/AArch64/rbit.ll b/llvm/test/CodeGen/AArch64/rbit.ll
index 288a25b..8c457b4 100644
--- a/llvm/test/CodeGen/AArch64/rbit.ll
+++ b/llvm/test/CodeGen/AArch64/rbit.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=aarch64-eabi %s -o - | FileCheck %s
+; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s
; The llvm.aarch64.rbit intrinsic should be auto-upgraded to the
; target-independent bitreverse intrinsic.
diff --git a/llvm/test/CodeGen/AArch64/rcpc3-sve.ll b/llvm/test/CodeGen/AArch64/rcpc3-sve.ll
index b9d9394..47e3381 100644
--- a/llvm/test/CodeGen/AArch64/rcpc3-sve.ll
+++ b/llvm/test/CodeGen/AArch64/rcpc3-sve.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+v8.9a -mattr=+sve -mattr=+rcpc3 < %s | FileCheck %s
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+v8.9a -mattr=+sve < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64 -mattr=+v8.9a -mattr=+sve -mattr=+rcpc3 < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64 -mattr=+v8.9a -mattr=+sve < %s | FileCheck %s
; Show what happens with RCPC3 for extract/insert into SVE vectors.
; Currently there is no RCPC3 codegen expected for this.
diff --git a/llvm/test/CodeGen/AArch64/rcpc3.ll b/llvm/test/CodeGen/AArch64/rcpc3.ll
index d416139..4577033 100644
--- a/llvm/test/CodeGen/AArch64/rcpc3.ll
+++ b/llvm/test/CodeGen/AArch64/rcpc3.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+v8.9a -mattr=+rcpc3 < %s | FileCheck --check-prefixes=BOTH,RCPC3 %s
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+v8.9a < %s | FileCheck --check-prefixes=BOTH,NO-RCPC3 %s
+; RUN: llc -mtriple=aarch64 -mattr=+v8.9a -mattr=+rcpc3 < %s | FileCheck --check-prefixes=BOTH,RCPC3 %s
+; RUN: llc -mtriple=aarch64 -mattr=+v8.9a < %s | FileCheck --check-prefixes=BOTH,NO-RCPC3 %s
define hidden <2 x i64> @test_ldap1_2xi64_lane0(ptr nocapture noundef readonly %a, <2 x i64> noundef %b) local_unnamed_addr {
;
diff --git a/llvm/test/CodeGen/AArch64/rem_crash.ll b/llvm/test/CodeGen/AArch64/rem_crash.ll
index f9cf6d5..38b46ea 100644
--- a/llvm/test/CodeGen/AArch64/rem_crash.ll
+++ b/llvm/test/CodeGen/AArch64/rem_crash.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=aarch64-eabi
+; RUN: llc < %s -mtriple=aarch64
define i8 @test_minsize_uu8(i8 %x) minsize optsize {
entry:
diff --git a/llvm/test/CodeGen/AArch64/rotate.ll b/llvm/test/CodeGen/AArch64/rotate.ll
index 8d52b6dd..7b4808e 100644
--- a/llvm/test/CodeGen/AArch64/rotate.ll
+++ b/llvm/test/CodeGen/AArch64/rotate.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64--linux-gnueabihf | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 | FileCheck %s
;; This used to cause a backend crash about not being able to
;; select ROTL. Make sure if generates the basic ushr/shl.
diff --git a/llvm/test/CodeGen/AArch64/setcc_knownbits.ll b/llvm/test/CodeGen/AArch64/setcc_knownbits.ll
index 9e9c814..46b714d 100644
--- a/llvm/test/CodeGen/AArch64/setcc_knownbits.ll
+++ b/llvm/test/CodeGen/AArch64/setcc_knownbits.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc < %s -mtriple=aarch64-none-eabi | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 | FileCheck %s
define i1 @load_bv_v4i8(i1 zeroext %a) {
; CHECK-LABEL: load_bv_v4i8:
diff --git a/llvm/test/CodeGen/AArch64/sls-stackprotector-outliner.ll b/llvm/test/CodeGen/AArch64/sls-stackprotector-outliner.ll
index 9e5e555..5f3b150 100644
--- a/llvm/test/CodeGen/AArch64/sls-stackprotector-outliner.ll
+++ b/llvm/test/CodeGen/AArch64/sls-stackprotector-outliner.ll
@@ -5,7 +5,7 @@
; inserted at a point where LR is live.
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
-target triple = "aarch64-arm-none-eabi"
+target triple = "aarch64"
define hidden void @_ZTv0_n24_N2C6D1Ev(ptr %this) minsize sspreq "target-features"="+harden-sls-retbr" {
; CHECK-LABEL: _ZTv0_n24_N2C6D1Ev:
diff --git a/llvm/test/CodeGen/AArch64/srem-seteq-vec-nonsplat.ll b/llvm/test/CodeGen/AArch64/srem-seteq-vec-nonsplat.ll
index f8c6f41..1ebfe30 100644
--- a/llvm/test/CodeGen/AArch64/srem-seteq-vec-nonsplat.ll
+++ b/llvm/test/CodeGen/AArch64/srem-seteq-vec-nonsplat.ll
@@ -35,18 +35,17 @@ define <4 x i32> @test_srem_odd_even(<4 x i32> %X) nounwind {
define <4 x i32> @test_srem_odd_allones_eq(<4 x i32> %X) nounwind {
; CHECK-LABEL: test_srem_odd_allones_eq:
; CHECK: // %bb.0:
+; CHECK-NEXT: movi v1.16b, #153
; CHECK-NEXT: mov w8, #52429 // =0xcccd
-; CHECK-NEXT: mov w9, #39321 // =0x9999
; CHECK-NEXT: movk w8, #52428, lsl #16
-; CHECK-NEXT: movk w9, #6553, lsl #16
-; CHECK-NEXT: dup v1.4s, w8
-; CHECK-NEXT: dup v2.4s, w9
+; CHECK-NEXT: dup v2.4s, w8
; CHECK-NEXT: adrp x8, .LCPI1_0
-; CHECK-NEXT: mla v2.4s, v0.4s, v1.4s
+; CHECK-NEXT: fneg v1.4s, v1.4s
+; CHECK-NEXT: mla v1.4s, v0.4s, v2.4s
; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI1_0]
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: cmhs v0.4s, v0.4s, v2.4s
-; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT: movi v2.4s, #1
+; CHECK-NEXT: cmhs v0.4s, v0.4s, v1.4s
+; CHECK-NEXT: and v0.16b, v0.16b, v2.16b
; CHECK-NEXT: ret
%srem = srem <4 x i32> %X, <i32 5, i32 5, i32 4294967295, i32 5>
%cmp = icmp eq <4 x i32> %srem, <i32 0, i32 0, i32 0, i32 0>
@@ -56,18 +55,17 @@ define <4 x i32> @test_srem_odd_allones_eq(<4 x i32> %X) nounwind {
define <4 x i32> @test_srem_odd_allones_ne(<4 x i32> %X) nounwind {
; CHECK-LABEL: test_srem_odd_allones_ne:
; CHECK: // %bb.0:
+; CHECK-NEXT: movi v1.16b, #153
; CHECK-NEXT: mov w8, #52429 // =0xcccd
-; CHECK-NEXT: mov w9, #39321 // =0x9999
; CHECK-NEXT: movk w8, #52428, lsl #16
-; CHECK-NEXT: movk w9, #6553, lsl #16
-; CHECK-NEXT: dup v1.4s, w8
-; CHECK-NEXT: dup v2.4s, w9
+; CHECK-NEXT: dup v2.4s, w8
; CHECK-NEXT: adrp x8, .LCPI2_0
-; CHECK-NEXT: mla v2.4s, v0.4s, v1.4s
+; CHECK-NEXT: fneg v1.4s, v1.4s
+; CHECK-NEXT: mla v1.4s, v0.4s, v2.4s
; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI2_0]
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: cmhi v0.4s, v2.4s, v0.4s
-; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT: movi v2.4s, #1
+; CHECK-NEXT: cmhi v0.4s, v1.4s, v0.4s
+; CHECK-NEXT: and v0.16b, v0.16b, v2.16b
; CHECK-NEXT: ret
%srem = srem <4 x i32> %X, <i32 5, i32 5, i32 4294967295, i32 5>
%cmp = icmp ne <4 x i32> %srem, <i32 0, i32 0, i32 0, i32 0>
@@ -269,18 +267,17 @@ define <4 x i32> @test_srem_odd_even_poweroftwo(<4 x i32> %X) nounwind {
define <4 x i32> @test_srem_odd_one(<4 x i32> %X) nounwind {
; CHECK-LABEL: test_srem_odd_one:
; CHECK: // %bb.0:
+; CHECK-NEXT: movi v1.16b, #153
; CHECK-NEXT: mov w8, #52429 // =0xcccd
-; CHECK-NEXT: mov w9, #39321 // =0x9999
; CHECK-NEXT: movk w8, #52428, lsl #16
-; CHECK-NEXT: movk w9, #6553, lsl #16
-; CHECK-NEXT: dup v1.4s, w8
-; CHECK-NEXT: dup v2.4s, w9
+; CHECK-NEXT: dup v2.4s, w8
; CHECK-NEXT: adrp x8, .LCPI10_0
-; CHECK-NEXT: mla v2.4s, v0.4s, v1.4s
+; CHECK-NEXT: fneg v1.4s, v1.4s
+; CHECK-NEXT: mla v1.4s, v0.4s, v2.4s
; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI10_0]
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: cmhs v0.4s, v0.4s, v2.4s
-; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT: movi v2.4s, #1
+; CHECK-NEXT: cmhs v0.4s, v0.4s, v1.4s
+; CHECK-NEXT: and v0.16b, v0.16b, v2.16b
; CHECK-NEXT: ret
%srem = srem <4 x i32> %X, <i32 5, i32 5, i32 1, i32 5>
%cmp = icmp eq <4 x i32> %srem, <i32 0, i32 0, i32 0, i32 0>
@@ -522,18 +519,17 @@ define <4 x i32> @test_srem_odd_even_allones_and_poweroftwo(<4 x i32> %X) nounwi
define <4 x i32> @test_srem_odd_allones_and_one(<4 x i32> %X) nounwind {
; CHECK-LABEL: test_srem_odd_allones_and_one:
; CHECK: // %bb.0:
+; CHECK-NEXT: movi v1.16b, #153
; CHECK-NEXT: mov w8, #52429 // =0xcccd
-; CHECK-NEXT: mov w9, #39321 // =0x9999
; CHECK-NEXT: movk w8, #52428, lsl #16
-; CHECK-NEXT: movk w9, #6553, lsl #16
-; CHECK-NEXT: dup v1.4s, w8
-; CHECK-NEXT: dup v2.4s, w9
+; CHECK-NEXT: dup v2.4s, w8
; CHECK-NEXT: adrp x8, .LCPI19_0
-; CHECK-NEXT: mla v2.4s, v0.4s, v1.4s
+; CHECK-NEXT: fneg v1.4s, v1.4s
+; CHECK-NEXT: mla v1.4s, v0.4s, v2.4s
; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI19_0]
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: cmhs v0.4s, v0.4s, v2.4s
-; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT: movi v2.4s, #1
+; CHECK-NEXT: cmhs v0.4s, v0.4s, v1.4s
+; CHECK-NEXT: and v0.16b, v0.16b, v2.16b
; CHECK-NEXT: ret
%srem = srem <4 x i32> %X, <i32 5, i32 4294967295, i32 1, i32 5>
%cmp = icmp eq <4 x i32> %srem, <i32 0, i32 0, i32 0, i32 0>
diff --git a/llvm/test/CodeGen/AArch64/stack-probing-64k.ll b/llvm/test/CodeGen/AArch64/stack-probing-64k.ll
index 0d64e73..5f833e3 100644
--- a/llvm/test/CodeGen/AArch64/stack-probing-64k.ll
+++ b/llvm/test/CodeGen/AArch64/stack-probing-64k.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple aarch64-none-eabi < %s -verify-machineinstrs -enable-post-misched=false | FileCheck %s
-; RUN: llc -mtriple aarch64-none-eabi < %s -verify-machineinstrs -enable-post-misched=false -global-isel | FileCheck %s
+; RUN: llc -mtriple=aarch64 < %s -verify-machineinstrs -enable-post-misched=false | FileCheck %s
+; RUN: llc -mtriple=aarch64 < %s -verify-machineinstrs -enable-post-misched=false -global-isel | FileCheck %s
; Tests for prolog sequences for stack probing, when using a 64KiB stack guard.
diff --git a/llvm/test/CodeGen/AArch64/stack-probing-dynamic.ll b/llvm/test/CodeGen/AArch64/stack-probing-dynamic.ll
index a3b8df4..d9ad104 100644
--- a/llvm/test/CodeGen/AArch64/stack-probing-dynamic.ll
+++ b/llvm/test/CodeGen/AArch64/stack-probing-dynamic.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple aarch64-none-eabi < %s -verify-machineinstrs | FileCheck %s
-; RUN: llc -mtriple aarch64-none-eabi < %s -verify-machineinstrs -global-isel -global-isel-abort=2 | FileCheck %s
+; RUN: llc -mtriple=aarch64 < %s -verify-machineinstrs | FileCheck %s
+; RUN: llc -mtriple=aarch64 < %s -verify-machineinstrs -global-isel -global-isel-abort=2 | FileCheck %s
; Dynamically-sized allocation, needs a loop which can handle any size at
; runtime. The final iteration of the loop will temporarily put SP below the
diff --git a/llvm/test/CodeGen/AArch64/stack-probing-sve.ll b/llvm/test/CodeGen/AArch64/stack-probing-sve.ll
index 03a9220..1ad7870 100644
--- a/llvm/test/CodeGen/AArch64/stack-probing-sve.ll
+++ b/llvm/test/CodeGen/AArch64/stack-probing-sve.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple aarch64-none-eabi < %s -verify-machineinstrs | FileCheck %s
-; RUN: llc -mtriple aarch64-none-eabi < %s -verify-machineinstrs -global-isel -global-isel-abort=2 | FileCheck %s
+; RUN: llc -mtriple=aarch64 < %s -verify-machineinstrs | FileCheck %s
+; RUN: llc -mtriple=aarch64 < %s -verify-machineinstrs -global-isel -global-isel-abort=2 | FileCheck %s
; Test prolog sequences for stack probing when SVE objects are involved.
diff --git a/llvm/test/CodeGen/AArch64/stack-probing.ll b/llvm/test/CodeGen/AArch64/stack-probing.ll
index df5408d..8fc90cf 100644
--- a/llvm/test/CodeGen/AArch64/stack-probing.ll
+++ b/llvm/test/CodeGen/AArch64/stack-probing.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple aarch64-none-eabi < %s -verify-machineinstrs -enable-post-misched=false | FileCheck %s
-; RUN: llc -mtriple aarch64-none-eabi < %s -verify-machineinstrs -enable-post-misched=false -global-isel | FileCheck %s
+; RUN: llc -mtriple=aarch64 < %s -verify-machineinstrs -enable-post-misched=false | FileCheck %s
+; RUN: llc -mtriple=aarch64 < %s -verify-machineinstrs -enable-post-misched=false -global-isel | FileCheck %s
; Tests for prolog sequences for stack probing, when using a 4KiB stack guard.
diff --git a/llvm/test/CodeGen/AArch64/sve-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve-fcopysign.ll
index d2315844..f158075 100644
--- a/llvm/test/CodeGen/AArch64/sve-fcopysign.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fcopysign.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple aarch64-eabi -mattr=+sve -o - | FileCheck --check-prefixes=CHECK,CHECK-NO-EXTEND-ROUND %s
-; RUN: llc < %s -mtriple aarch64-eabi -mattr=+sve --combiner-vector-fcopysign-extend-round -o - | FileCheck --check-prefixes=CHECK,CHECK-EXTEND-ROUND %s
+; RUN: llc < %s -mtriple=aarch64 -mattr=+sve -o - | FileCheck --check-prefixes=CHECK,CHECK-NO-EXTEND-ROUND %s
+; RUN: llc < %s -mtriple=aarch64 -mattr=+sve --combiner-vector-fcopysign-extend-round -o - | FileCheck --check-prefixes=CHECK,CHECK-EXTEND-ROUND %s
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
;============ v2f32
diff --git a/llvm/test/CodeGen/AArch64/sve2-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve2-fcopysign.ll
index 7f65997..14cc8cd 100644
--- a/llvm/test/CodeGen/AArch64/sve2-fcopysign.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-fcopysign.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple aarch64-eabi -mattr=+sve2 -o - | FileCheck --check-prefixes=CHECK,CHECK_NO_EXTEND_ROUND %s
-; RUN: llc < %s -mtriple aarch64-eabi -mattr=+sve2 --combiner-vector-fcopysign-extend-round -o - | FileCheck --check-prefixes=CHECK,CHECK_EXTEND_ROUND %s
+; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -o - | FileCheck --check-prefixes=CHECK,CHECK_NO_EXTEND_ROUND %s
+; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 --combiner-vector-fcopysign-extend-round -o - | FileCheck --check-prefixes=CHECK,CHECK_EXTEND_ROUND %s
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
diff --git a/llvm/test/CodeGen/AArch64/tailmerging_in_mbp.ll b/llvm/test/CodeGen/AArch64/tailmerging_in_mbp.ll
index 54c200e..6753807 100644
--- a/llvm/test/CodeGen/AArch64/tailmerging_in_mbp.ll
+++ b/llvm/test/CodeGen/AArch64/tailmerging_in_mbp.ll
@@ -1,4 +1,4 @@
-; RUN: llc <%s -mtriple=aarch64-eabi -verify-machine-dom-info | FileCheck %s
+; RUN: llc <%s -mtriple=aarch64 -verify-machine-dom-info | FileCheck %s
; CHECK-LABEL: test:
; CHECK-LABEL: %cond.false12.i
diff --git a/llvm/test/CodeGen/AArch64/tbz-tbnz.ll b/llvm/test/CodeGen/AArch64/tbz-tbnz.ll
index 1edea22..d301a38 100644
--- a/llvm/test/CodeGen/AArch64/tbz-tbnz.ll
+++ b/llvm/test/CodeGen/AArch64/tbz-tbnz.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O1 -mtriple=aarch64-eabi -aarch64-enable-cond-br-tune=false | FileCheck %s
+; RUN: llc < %s -O1 -mtriple=aarch64 -aarch64-enable-cond-br-tune=false | FileCheck %s
declare void @t()
diff --git a/llvm/test/CodeGen/AArch64/urem-seteq-vec-nonzero.ll b/llvm/test/CodeGen/AArch64/urem-seteq-vec-nonzero.ll
index 478a34c..b31ce94 100644
--- a/llvm/test/CodeGen/AArch64/urem-seteq-vec-nonzero.ll
+++ b/llvm/test/CodeGen/AArch64/urem-seteq-vec-nonzero.ll
@@ -51,12 +51,11 @@ define <4 x i1> @t32_6_part0(<4 x i32> %X) nounwind {
; CHECK-NEXT: movk w8, #43690, lsl #16
; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s
; CHECK-NEXT: dup v1.4s, w8
-; CHECK-NEXT: mov w8, #43690 // =0xaaaa
-; CHECK-NEXT: movk w8, #10922, lsl #16
; CHECK-NEXT: mul v0.4s, v0.4s, v1.4s
; CHECK-NEXT: shl v1.4s, v0.4s, #31
; CHECK-NEXT: usra v1.4s, v0.4s, #1
-; CHECK-NEXT: dup v0.4s, w8
+; CHECK-NEXT: movi v0.16b, #170
+; CHECK-NEXT: fneg v0.4s, v0.4s
; CHECK-NEXT: cmhs v0.4s, v0.4s, v1.4s
; CHECK-NEXT: xtn v0.4h, v0.4s
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/urem-vector-lkk.ll b/llvm/test/CodeGen/AArch64/urem-vector-lkk.ll
index dc021bc..468a33c 100644
--- a/llvm/test/CodeGen/AArch64/urem-vector-lkk.ll
+++ b/llvm/test/CodeGen/AArch64/urem-vector-lkk.ll
@@ -10,18 +10,18 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) {
; CHECK-NEXT: ldr d2, [x8, :lo12:.LCPI0_1]
; CHECK-NEXT: adrp x8, .LCPI0_2
; CHECK-NEXT: ushl v1.4h, v0.4h, v1.4h
-; CHECK-NEXT: ldr d3, [x8, :lo12:.LCPI0_2]
-; CHECK-NEXT: adrp x8, .LCPI0_3
; CHECK-NEXT: umull v1.4s, v1.4h, v2.4h
+; CHECK-NEXT: movi d2, #0000000000000000
; CHECK-NEXT: shrn v1.4h, v1.4s, #16
-; CHECK-NEXT: sub v2.4h, v0.4h, v1.4h
-; CHECK-NEXT: umull v2.4s, v2.4h, v3.4h
+; CHECK-NEXT: fneg d2, d2
+; CHECK-NEXT: sub v3.4h, v0.4h, v1.4h
+; CHECK-NEXT: umull v2.4s, v3.4h, v2.4h
; CHECK-NEXT: shrn v2.4h, v2.4s, #16
; CHECK-NEXT: add v1.4h, v2.4h, v1.4h
-; CHECK-NEXT: ldr d2, [x8, :lo12:.LCPI0_3]
-; CHECK-NEXT: adrp x8, .LCPI0_4
+; CHECK-NEXT: ldr d2, [x8, :lo12:.LCPI0_2]
+; CHECK-NEXT: adrp x8, .LCPI0_3
; CHECK-NEXT: ushl v1.4h, v1.4h, v2.4h
-; CHECK-NEXT: ldr d2, [x8, :lo12:.LCPI0_4]
+; CHECK-NEXT: ldr d2, [x8, :lo12:.LCPI0_3]
; CHECK-NEXT: mls v0.4h, v1.4h, v2.4h
; CHECK-NEXT: ret
%1 = urem <4 x i16> %x, <i16 95, i16 124, i16 98, i16 1003>
diff --git a/llvm/test/CodeGen/AArch64/v3f-to-int.ll b/llvm/test/CodeGen/AArch64/v3f-to-int.ll
index a3c9c8f..f6553b6 100644
--- a/llvm/test/CodeGen/AArch64/v3f-to-int.ll
+++ b/llvm/test/CodeGen/AArch64/v3f-to-int.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=aarch64--linux-eabi %s -o - | FileCheck %s
+; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s
; CHECK-LABEL: convert_v3f32
; CHECK: strb
diff --git a/llvm/test/CodeGen/AArch64/v8.5a-neon-frint3264-intrinsic.ll b/llvm/test/CodeGen/AArch64/v8.5a-neon-frint3264-intrinsic.ll
index 1979d97..85187f1 100644
--- a/llvm/test/CodeGen/AArch64/v8.5a-neon-frint3264-intrinsic.ll
+++ b/llvm/test/CodeGen/AArch64/v8.5a-neon-frint3264-intrinsic.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=aarch64-eabi -mattr=+v8.5a | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -mattr=+v8.5a | FileCheck %s
declare <2 x float> @llvm.aarch64.neon.frint32x.v2f32(<2 x float>)
declare <4 x float> @llvm.aarch64.neon.frint32x.v4f32(<4 x float>)
diff --git a/llvm/test/CodeGen/AArch64/v8.5a-scalar-frint3264-intrinsic.ll b/llvm/test/CodeGen/AArch64/v8.5a-scalar-frint3264-intrinsic.ll
index b4a0f3d..728c13d 100644
--- a/llvm/test/CodeGen/AArch64/v8.5a-scalar-frint3264-intrinsic.ll
+++ b/llvm/test/CodeGen/AArch64/v8.5a-scalar-frint3264-intrinsic.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=aarch64-eabi -mattr=+v8.5a | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -mattr=+v8.5a | FileCheck %s
declare float @llvm.aarch64.frint32z.f32(float)
declare double @llvm.aarch64.frint32z.f64(double)
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-fadd-strict.ll b/llvm/test/CodeGen/AArch64/vecreduce-fadd-strict.ll
index de95943..1164e02 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-fadd-strict.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-fadd-strict.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
define float @add_HalfS(<2 x float> %bin.rdx) {
; CHECK-SD-LABEL: add_HalfS:
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-fadd.ll b/llvm/test/CodeGen/AArch64/vecreduce-fadd.ll
index aaba379..99c6808 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-fadd.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-fadd.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
define float @add_HalfS(<2 x float> %bin.rdx) {
; CHECK-LABEL: add_HalfS:
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-fmul-strict.ll b/llvm/test/CodeGen/AArch64/vecreduce-fmul-strict.ll
index 7b93e60..e1b2170 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-fmul-strict.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-fmul-strict.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
define float @mul_HalfS(<2 x float> %bin.rdx) {
; CHECK-SD-LABEL: mul_HalfS:
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-fmul.ll b/llvm/test/CodeGen/AArch64/vecreduce-fmul.ll
index 67b4ebb..e85384e 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-fmul.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-fmul.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
-; RUN: llc -mtriple=aarch64-none-eabi -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
define float @mul_HalfS(<2 x float> %bin.rdx) {
; CHECK-SD-LABEL: mul_HalfS:
diff --git a/llvm/test/CodeGen/AArch64/xar.ll b/llvm/test/CodeGen/AArch64/xar.ll
index c602837..d050eaf 100644
--- a/llvm/test/CodeGen/AArch64/xar.ll
+++ b/llvm/test/CodeGen/AArch64/xar.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=+sha3 < %s | FileCheck --check-prefix=SHA3 %s
-; RUN: llc -mtriple=aarch64-none-eabi -mattr=-sha3 < %s | FileCheck --check-prefix=NOSHA3 %s
+; RUN: llc -mtriple=aarch64 -mattr=+sha3 < %s | FileCheck --check-prefix=SHA3 %s
+; RUN: llc -mtriple=aarch64 -mattr=-sha3 < %s | FileCheck --check-prefix=NOSHA3 %s
define <2 x i64> @xar(<2 x i64> %x, <2 x i64> %y) {
; SHA3-LABEL: xar:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_udec_wrap.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_udec_wrap.ll
index 25cee87..b04bc04 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_udec_wrap.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_udec_wrap.ll
@@ -338,8 +338,8 @@ define amdgpu_kernel void @global_atomic_dec_ret_i32(ptr addrspace(1) %out, ptr
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_dec v0, v1, v0, s[2:3] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: global_store_dword v1, v0, s[0:1]
; GFX10-NEXT: s_endpgm
;
@@ -350,8 +350,8 @@ define amdgpu_kernel void @global_atomic_dec_ret_i32(ptr addrspace(1) %out, ptr
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_dec_u32 v0, v1, v0, s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -416,8 +416,8 @@ define amdgpu_kernel void @global_atomic_dec_ret_i32_offset(ptr addrspace(1) %ou
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_dec v0, v1, v0, s[2:3] offset:16 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: global_store_dword v1, v0, s[0:1]
; GFX10-NEXT: s_endpgm
;
@@ -428,8 +428,8 @@ define amdgpu_kernel void @global_atomic_dec_ret_i32_offset(ptr addrspace(1) %ou
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_dec_u32 v0, v1, v0, s[2:3] offset:16 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -495,8 +495,8 @@ define amdgpu_kernel void @global_atomic_dec_ret_i32_offset_system(ptr addrspace
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_dec v0, v1, v0, s[2:3] offset:16 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: global_store_dword v1, v0, s[0:1]
; GFX10-NEXT: s_endpgm
;
@@ -507,8 +507,8 @@ define amdgpu_kernel void @global_atomic_dec_ret_i32_offset_system(ptr addrspace
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_dec_u32 v0, v1, v0, s[2:3] offset:16 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -563,8 +563,8 @@ define amdgpu_kernel void @global_atomic_dec_noret_i32(ptr addrspace(1) %ptr) #1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_dec v1, v0, s[0:1]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_dec_noret_i32:
@@ -574,8 +574,8 @@ define amdgpu_kernel void @global_atomic_dec_noret_i32(ptr addrspace(1) %ptr) #1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_dec_u32 v1, v0, s[0:1]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%result = atomicrmw udec_wrap ptr addrspace(1) %ptr, i32 42 syncscope("agent") seq_cst, align 4
ret void
@@ -629,8 +629,8 @@ define amdgpu_kernel void @global_atomic_dec_noret_i32_offset(ptr addrspace(1) %
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_dec v1, v0, s[0:1] offset:16
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_dec_noret_i32_offset:
@@ -640,8 +640,8 @@ define amdgpu_kernel void @global_atomic_dec_noret_i32_offset(ptr addrspace(1) %
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_dec_u32 v1, v0, s[0:1] offset:16
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4
%result = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 42 syncscope("agent") seq_cst, align 4
@@ -696,8 +696,8 @@ define amdgpu_kernel void @global_atomic_dec_noret_i32_offset_system(ptr addrspa
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_dec v1, v0, s[0:1] offset:16
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_dec_noret_i32_offset_system:
@@ -707,8 +707,8 @@ define amdgpu_kernel void @global_atomic_dec_noret_i32_offset_system(ptr addrspa
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_dec_u32 v1, v0, s[0:1] offset:16
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4
%result = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 42 seq_cst, align 4
@@ -780,8 +780,8 @@ define amdgpu_kernel void @global_atomic_dec_ret_i32_offset_addr64(ptr addrspace
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_dec v1, v0, v1, s[2:3] offset:20 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-NEXT: s_endpgm
;
@@ -792,8 +792,8 @@ define amdgpu_kernel void @global_atomic_dec_ret_i32_offset_addr64(ptr addrspace
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_dec_u32 v1, v0, v1, s[2:3] offset:20 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -861,8 +861,8 @@ define amdgpu_kernel void @global_atomic_dec_noret_i32_offset_addr64(ptr addrspa
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_dec v0, v1, s[0:1] offset:20
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_dec_noret_i32_offset_addr64:
@@ -872,8 +872,8 @@ define amdgpu_kernel void @global_atomic_dec_noret_i32_offset_addr64(ptr addrspa
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_dec_u32 v0, v1, s[0:1] offset:20
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i32, ptr addrspace(1) %ptr, i32 %id
@@ -937,8 +937,8 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i32(ptr %out, ptr %ptr) #1 {
; GFX10-NEXT: v_mov_b32_e32 v1, s3
; GFX10-NEXT: flat_atomic_dec v2, v[0:1], v2 glc
; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_mov_b32_e32 v0, s0
; GFX10-NEXT: v_mov_b32_e32 v1, s1
; GFX10-NEXT: flat_store_dword v[0:1], v2
@@ -952,8 +952,8 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i32(ptr %out, ptr %ptr) #1 {
; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-NEXT: flat_atomic_dec_u32 v2, v[0:1], v2 glc
; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: flat_store_b32 v[0:1], v2
; GFX11-NEXT: s_endpgm
@@ -1023,8 +1023,8 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i32_offset(ptr %out, ptr %ptr) #1
; GFX10-NEXT: v_mov_b32_e32 v1, s3
; GFX10-NEXT: flat_atomic_dec v2, v[0:1], v2 glc
; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_mov_b32_e32 v0, s0
; GFX10-NEXT: v_mov_b32_e32 v1, s1
; GFX10-NEXT: flat_store_dword v[0:1], v2
@@ -1038,8 +1038,8 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i32_offset(ptr %out, ptr %ptr) #1
; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-NEXT: flat_atomic_dec_u32 v2, v[0:1], v2 offset:16 glc
; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: flat_store_b32 v[0:1], v2
; GFX11-NEXT: s_endpgm
@@ -1110,8 +1110,8 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i32_offset_system(ptr %out, ptr %
; GFX10-NEXT: v_mov_b32_e32 v1, s3
; GFX10-NEXT: flat_atomic_dec v2, v[0:1], v2 glc
; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_mov_b32_e32 v0, s0
; GFX10-NEXT: v_mov_b32_e32 v1, s1
; GFX10-NEXT: flat_store_dword v[0:1], v2
@@ -1125,8 +1125,8 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i32_offset_system(ptr %out, ptr %
; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-NEXT: flat_atomic_dec_u32 v2, v[0:1], v2 offset:16 glc
; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: flat_store_b32 v[0:1], v2
; GFX11-NEXT: s_endpgm
@@ -1183,8 +1183,8 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i32(ptr %ptr) #1 {
; GFX10-NEXT: flat_atomic_dec v[0:1], v2
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: flat_atomic_dec_noret_i32:
@@ -1196,8 +1196,8 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i32(ptr %ptr) #1 {
; GFX11-NEXT: flat_atomic_dec_u32 v[0:1], v2
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%result = atomicrmw udec_wrap ptr %ptr, i32 42 syncscope("agent") seq_cst, align 4
ret void
@@ -1256,8 +1256,8 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i32_offset(ptr %ptr) #1 {
; GFX10-NEXT: flat_atomic_dec v[0:1], v2
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: flat_atomic_dec_noret_i32_offset:
@@ -1269,8 +1269,8 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i32_offset(ptr %ptr) #1 {
; GFX11-NEXT: flat_atomic_dec_u32 v[0:1], v2 offset:16
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%gep = getelementptr i32, ptr %ptr, i32 4
%result = atomicrmw udec_wrap ptr %gep, i32 42 syncscope("agent") seq_cst, align 4
@@ -1330,8 +1330,8 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i32_offset_system(ptr %ptr) #1
; GFX10-NEXT: flat_atomic_dec v[0:1], v2
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: flat_atomic_dec_noret_i32_offset_system:
@@ -1343,8 +1343,8 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i32_offset_system(ptr %ptr) #1
; GFX11-NEXT: flat_atomic_dec_u32 v[0:1], v2 offset:16
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%gep = getelementptr i32, ptr %ptr, i32 4
%result = atomicrmw udec_wrap ptr %gep, i32 42 seq_cst, align 4
@@ -1430,8 +1430,8 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i32_offset_addr64(ptr %out, ptr %
; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
; GFX10-NEXT: flat_atomic_dec v3, v[0:1], v3 glc
; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_mov_b32_e32 v0, s0
; GFX10-NEXT: v_mov_b32_e32 v1, s1
; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
@@ -1450,8 +1450,8 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i32_offset_addr64(ptr %out, ptr %
; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
; GFX11-NEXT: flat_atomic_dec_u32 v3, v[0:1], v3 offset:20 glc
; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
@@ -1532,8 +1532,8 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i32_offset_addr64(ptr %ptr) #1
; GFX10-NEXT: flat_atomic_dec v[0:1], v2
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: flat_atomic_dec_noret_i32_offset_addr64:
@@ -1549,8 +1549,8 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i32_offset_addr64(ptr %ptr) #1
; GFX11-NEXT: flat_atomic_dec_u32 v[0:1], v2 offset:20
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i32, ptr %ptr, i32 %id
@@ -1628,8 +1628,8 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i64(ptr %out, ptr %ptr) #1 {
; GFX10-NEXT: v_mov_b32_e32 v3, s3
; GFX10-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] glc
; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_mov_b32_e32 v3, s1
; GFX10-NEXT: v_mov_b32_e32 v2, s0
; GFX10-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
@@ -1644,8 +1644,8 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i64(ptr %out, ptr %ptr) #1 {
; GFX11-NEXT: v_mov_b32_e32 v3, s3
; GFX11-NEXT: flat_atomic_dec_u64 v[0:1], v[2:3], v[0:1] glc
; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX11-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX11-NEXT: s_endpgm
@@ -1729,8 +1729,8 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i64_offset(ptr %out, ptr %ptr) #1
; GFX10-NEXT: v_mov_b32_e32 v3, s3
; GFX10-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] glc
; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_mov_b32_e32 v3, s1
; GFX10-NEXT: v_mov_b32_e32 v2, s0
; GFX10-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
@@ -1745,8 +1745,8 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i64_offset(ptr %out, ptr %ptr) #1
; GFX11-NEXT: v_mov_b32_e32 v3, s3
; GFX11-NEXT: flat_atomic_dec_u64 v[0:1], v[2:3], v[0:1] offset:32 glc
; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX11-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX11-NEXT: s_endpgm
@@ -1807,8 +1807,8 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64(ptr %ptr) #1 {
; GFX10-NEXT: flat_atomic_dec_x2 v[2:3], v[0:1]
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: flat_atomic_dec_noret_i64:
@@ -1821,8 +1821,8 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64(ptr %ptr) #1 {
; GFX11-NEXT: flat_atomic_dec_u64 v[2:3], v[0:1]
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%result = atomicrmw udec_wrap ptr %ptr, i64 42 syncscope("agent") seq_cst, align 8
ret void
@@ -1885,8 +1885,8 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset(ptr %ptr) #1 {
; GFX10-NEXT: flat_atomic_dec_x2 v[2:3], v[0:1]
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: flat_atomic_dec_noret_i64_offset:
@@ -1899,8 +1899,8 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset(ptr %ptr) #1 {
; GFX11-NEXT: flat_atomic_dec_u64 v[2:3], v[0:1] offset:32
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%gep = getelementptr i64, ptr %ptr, i32 4
%result = atomicrmw udec_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8
@@ -1964,8 +1964,8 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset_system(ptr %ptr) #1
; GFX10-NEXT: flat_atomic_dec_x2 v[2:3], v[0:1]
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: flat_atomic_dec_noret_i64_offset_system:
@@ -1978,8 +1978,8 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset_system(ptr %ptr) #1
; GFX11-NEXT: flat_atomic_dec_u64 v[2:3], v[0:1] offset:32
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%gep = getelementptr i64, ptr %ptr, i32 4
%result = atomicrmw udec_wrap ptr %gep, i64 42 seq_cst, align 8
@@ -2075,8 +2075,8 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i64_offset_addr64(ptr %out, ptr %
; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
; GFX10-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] glc
; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_mov_b32_e32 v3, s1
; GFX10-NEXT: v_mov_b32_e32 v2, s0
; GFX10-NEXT: v_add_co_u32 v2, vcc_lo, v2, v4
@@ -2097,8 +2097,8 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i64_offset_addr64(ptr %out, ptr %
; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
; GFX11-NEXT: flat_atomic_dec_u64 v[0:1], v[0:1], v[2:3] offset:40 glc
; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v2, v4
@@ -2183,8 +2183,8 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset_addr64(ptr %ptr) #1
; GFX10-NEXT: flat_atomic_dec_x2 v[2:3], v[0:1]
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: flat_atomic_dec_noret_i64_offset_addr64:
@@ -2201,8 +2201,8 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset_addr64(ptr %ptr) #1
; GFX11-NEXT: flat_atomic_dec_u64 v[0:1], v[2:3] offset:40
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i64, ptr %ptr, i32 %id
@@ -2651,8 +2651,8 @@ define amdgpu_kernel void @global_atomic_dec_ret_i64(ptr addrspace(1) %out, ptr
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_dec_x2 v[0:1], v2, v[0:1], s[2:3] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX10-NEXT: s_endpgm
;
@@ -2664,8 +2664,8 @@ define amdgpu_kernel void @global_atomic_dec_ret_i64(ptr addrspace(1) %out, ptr
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_dec_u64 v[0:1], v2, v[0:1], s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -2734,8 +2734,8 @@ define amdgpu_kernel void @global_atomic_dec_ret_i64_offset(ptr addrspace(1) %ou
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_dec_x2 v[0:1], v2, v[0:1], s[2:3] offset:32 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX10-NEXT: s_endpgm
;
@@ -2747,8 +2747,8 @@ define amdgpu_kernel void @global_atomic_dec_ret_i64_offset(ptr addrspace(1) %ou
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_dec_u64 v[0:1], v2, v[0:1], s[2:3] offset:32 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -2818,8 +2818,8 @@ define amdgpu_kernel void @global_atomic_dec_ret_i64_offset_system(ptr addrspace
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_dec_x2 v[0:1], v2, v[0:1], s[2:3] offset:32 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX10-NEXT: s_endpgm
;
@@ -2831,8 +2831,8 @@ define amdgpu_kernel void @global_atomic_dec_ret_i64_offset_system(ptr addrspace
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_dec_u64 v[0:1], v2, v[0:1], s[2:3] offset:32 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -2891,8 +2891,8 @@ define amdgpu_kernel void @global_atomic_dec_noret_i64(ptr addrspace(1) %ptr) #1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_dec_x2 v2, v[0:1], s[0:1]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_dec_noret_i64:
@@ -2903,8 +2903,8 @@ define amdgpu_kernel void @global_atomic_dec_noret_i64(ptr addrspace(1) %ptr) #1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_dec_u64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%result = atomicrmw udec_wrap ptr addrspace(1) %ptr, i64 42 syncscope("agent") seq_cst, align 8
ret void
@@ -2962,8 +2962,8 @@ define amdgpu_kernel void @global_atomic_dec_noret_i64_offset(ptr addrspace(1) %
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_dec_x2 v2, v[0:1], s[0:1] offset:32
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_dec_noret_i64_offset:
@@ -2974,8 +2974,8 @@ define amdgpu_kernel void @global_atomic_dec_noret_i64_offset(ptr addrspace(1) %
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_dec_u64 v2, v[0:1], s[0:1] offset:32
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%gep = getelementptr i64, ptr addrspace(1) %ptr, i32 4
%result = atomicrmw udec_wrap ptr addrspace(1) %gep, i64 42 syncscope("agent") seq_cst, align 8
@@ -3034,8 +3034,8 @@ define amdgpu_kernel void @global_atomic_dec_noret_i64_offset_system(ptr addrspa
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_dec_x2 v2, v[0:1], s[0:1] offset:32
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_dec_noret_i64_offset_system:
@@ -3046,8 +3046,8 @@ define amdgpu_kernel void @global_atomic_dec_noret_i64_offset_system(ptr addrspa
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_dec_u64 v2, v[0:1], s[0:1] offset:32
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%gep = getelementptr i64, ptr addrspace(1) %ptr, i32 4
%result = atomicrmw udec_wrap ptr addrspace(1) %gep, i64 42 seq_cst, align 8
@@ -3123,8 +3123,8 @@ define amdgpu_kernel void @global_atomic_dec_ret_i64_offset_addr64(ptr addrspace
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_dec_x2 v[0:1], v3, v[1:2], s[2:3] offset:40 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: global_store_dwordx2 v3, v[0:1], s[0:1]
; GFX10-NEXT: s_endpgm
;
@@ -3136,8 +3136,8 @@ define amdgpu_kernel void @global_atomic_dec_ret_i64_offset_addr64(ptr addrspace
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_dec_u64 v[0:1], v3, v[1:2], s[2:3] offset:40 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: global_store_b64 v3, v[0:1], s[0:1]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -3209,8 +3209,8 @@ define amdgpu_kernel void @global_atomic_dec_noret_i64_offset_addr64(ptr addrspa
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_dec_x2 v0, v[1:2], s[0:1] offset:40
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_dec_noret_i64_offset_addr64:
@@ -3221,8 +3221,8 @@ define amdgpu_kernel void @global_atomic_dec_noret_i64_offset_addr64(ptr addrspa
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_dec_u64 v0, v[1:2], s[0:1] offset:40
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i64, ptr addrspace(1) %ptr, i32 %id
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_uinc_wrap.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_uinc_wrap.ll
index 5e01337..f6a997f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_uinc_wrap.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_uinc_wrap.ll
@@ -338,8 +338,8 @@ define amdgpu_kernel void @global_atomic_inc_ret_i32(ptr addrspace(1) %out, ptr
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_inc v0, v1, v0, s[2:3] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: global_store_dword v1, v0, s[0:1]
; GFX10-NEXT: s_endpgm
;
@@ -350,8 +350,8 @@ define amdgpu_kernel void @global_atomic_inc_ret_i32(ptr addrspace(1) %out, ptr
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_inc_u32 v0, v1, v0, s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -416,8 +416,8 @@ define amdgpu_kernel void @global_atomic_inc_ret_i32_offset(ptr addrspace(1) %ou
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_inc v0, v1, v0, s[2:3] offset:16 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: global_store_dword v1, v0, s[0:1]
; GFX10-NEXT: s_endpgm
;
@@ -428,8 +428,8 @@ define amdgpu_kernel void @global_atomic_inc_ret_i32_offset(ptr addrspace(1) %ou
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_inc_u32 v0, v1, v0, s[2:3] offset:16 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -495,8 +495,8 @@ define amdgpu_kernel void @global_atomic_inc_ret_i32_offset_sistem(ptr addrspace
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_inc v0, v1, v0, s[2:3] offset:16 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: global_store_dword v1, v0, s[0:1]
; GFX10-NEXT: s_endpgm
;
@@ -507,8 +507,8 @@ define amdgpu_kernel void @global_atomic_inc_ret_i32_offset_sistem(ptr addrspace
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_inc_u32 v0, v1, v0, s[2:3] offset:16 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -563,8 +563,8 @@ define amdgpu_kernel void @global_atomic_inc_noret_i32(ptr addrspace(1) %ptr) #1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_inc v1, v0, s[0:1]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_inc_noret_i32:
@@ -574,8 +574,8 @@ define amdgpu_kernel void @global_atomic_inc_noret_i32(ptr addrspace(1) %ptr) #1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_inc_u32 v1, v0, s[0:1]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%result = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i32 42 syncscope("agent") seq_cst, align 4
ret void
@@ -629,8 +629,8 @@ define amdgpu_kernel void @global_atomic_inc_noret_i32_offset(ptr addrspace(1) %
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_inc v1, v0, s[0:1] offset:16
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_inc_noret_i32_offset:
@@ -640,8 +640,8 @@ define amdgpu_kernel void @global_atomic_inc_noret_i32_offset(ptr addrspace(1) %
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_inc_u32 v1, v0, s[0:1] offset:16
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4
%result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i32 42 syncscope("agent") seq_cst, align 4
@@ -696,8 +696,8 @@ define amdgpu_kernel void @global_atomic_inc_noret_i32_offset_system(ptr addrspa
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_inc v1, v0, s[0:1] offset:16
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_inc_noret_i32_offset_system:
@@ -707,8 +707,8 @@ define amdgpu_kernel void @global_atomic_inc_noret_i32_offset_system(ptr addrspa
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_inc_u32 v1, v0, s[0:1] offset:16
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4
%result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i32 42 seq_cst, align 4
@@ -780,8 +780,8 @@ define amdgpu_kernel void @global_atomic_inc_ret_i32_offset_addr64(ptr addrspace
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_inc v1, v0, v1, s[2:3] offset:20 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-NEXT: s_endpgm
;
@@ -792,8 +792,8 @@ define amdgpu_kernel void @global_atomic_inc_ret_i32_offset_addr64(ptr addrspace
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_inc_u32 v1, v0, v1, s[2:3] offset:20 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -861,8 +861,8 @@ define amdgpu_kernel void @global_atomic_inc_noret_i32_offset_addr64(ptr addrspa
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_inc v0, v1, s[0:1] offset:20
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_inc_noret_i32_offset_addr64:
@@ -872,8 +872,8 @@ define amdgpu_kernel void @global_atomic_inc_noret_i32_offset_addr64(ptr addrspa
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_inc_u32 v0, v1, s[0:1] offset:20
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i32, ptr addrspace(1) %ptr, i32 %id
@@ -1322,8 +1322,8 @@ define amdgpu_kernel void @global_atomic_inc_ret_i64(ptr addrspace(1) %out, ptr
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_inc_x2 v[0:1], v2, v[0:1], s[2:3] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX10-NEXT: s_endpgm
;
@@ -1335,8 +1335,8 @@ define amdgpu_kernel void @global_atomic_inc_ret_i64(ptr addrspace(1) %out, ptr
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_inc_u64 v[0:1], v2, v[0:1], s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -1405,8 +1405,8 @@ define amdgpu_kernel void @global_atomic_inc_ret_i64_offset(ptr addrspace(1) %ou
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_inc_x2 v[0:1], v2, v[0:1], s[2:3] offset:32 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX10-NEXT: s_endpgm
;
@@ -1418,8 +1418,8 @@ define amdgpu_kernel void @global_atomic_inc_ret_i64_offset(ptr addrspace(1) %ou
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_inc_u64 v[0:1], v2, v[0:1], s[2:3] offset:32 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -1489,8 +1489,8 @@ define amdgpu_kernel void @global_atomic_inc_ret_i64_offset_system(ptr addrspace
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_inc_x2 v[0:1], v2, v[0:1], s[2:3] offset:32 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX10-NEXT: s_endpgm
;
@@ -1502,8 +1502,8 @@ define amdgpu_kernel void @global_atomic_inc_ret_i64_offset_system(ptr addrspace
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_inc_u64 v[0:1], v2, v[0:1], s[2:3] offset:32 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -1562,8 +1562,8 @@ define amdgpu_kernel void @global_atomic_inc_noret_i64(ptr addrspace(1) %ptr) #1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_inc_x2 v2, v[0:1], s[0:1]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_inc_noret_i64:
@@ -1574,8 +1574,8 @@ define amdgpu_kernel void @global_atomic_inc_noret_i64(ptr addrspace(1) %ptr) #1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_inc_u64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%result = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i64 42 syncscope("agent") seq_cst, align 8
ret void
@@ -1633,8 +1633,8 @@ define amdgpu_kernel void @global_atomic_inc_noret_i64_offset(ptr addrspace(1) %
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_inc_x2 v2, v[0:1], s[0:1] offset:32
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_inc_noret_i64_offset:
@@ -1645,8 +1645,8 @@ define amdgpu_kernel void @global_atomic_inc_noret_i64_offset(ptr addrspace(1) %
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_inc_u64 v2, v[0:1], s[0:1] offset:32
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%gep = getelementptr i64, ptr addrspace(1) %ptr, i32 4
%result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i64 42 syncscope("agent") seq_cst, align 8
@@ -1705,8 +1705,8 @@ define amdgpu_kernel void @global_atomic_inc_noret_i64_offset_system(ptr addrspa
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_inc_x2 v2, v[0:1], s[0:1] offset:32
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_inc_noret_i64_offset_system:
@@ -1717,8 +1717,8 @@ define amdgpu_kernel void @global_atomic_inc_noret_i64_offset_system(ptr addrspa
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_inc_u64 v2, v[0:1], s[0:1] offset:32
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%gep = getelementptr i64, ptr addrspace(1) %ptr, i32 4
%result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i64 42 seq_cst, align 8
@@ -1794,8 +1794,8 @@ define amdgpu_kernel void @global_atomic_inc_ret_i64_offset_addr64(ptr addrspace
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_inc_x2 v[0:1], v3, v[1:2], s[2:3] offset:40 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: global_store_dwordx2 v3, v[0:1], s[0:1]
; GFX10-NEXT: s_endpgm
;
@@ -1807,8 +1807,8 @@ define amdgpu_kernel void @global_atomic_inc_ret_i64_offset_addr64(ptr addrspace
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_inc_u64 v[0:1], v3, v[1:2], s[2:3] offset:40 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: global_store_b64 v3, v[0:1], s[0:1]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -1880,8 +1880,8 @@ define amdgpu_kernel void @global_atomic_inc_noret_i64_offset_addr64(ptr addrspa
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_atomic_inc_x2 v0, v[1:2], s[0:1] offset:40
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_inc_noret_i64_offset_addr64:
@@ -1892,8 +1892,8 @@ define amdgpu_kernel void @global_atomic_inc_noret_i64_offset_addr64(ptr addrspa
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_inc_u64 v0, v[1:2], s[0:1] offset:40
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i64, ptr addrspace(1) %ptr, i32 %id
@@ -1957,8 +1957,8 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i32(ptr %out, ptr %ptr) #1 {
; GFX10-NEXT: v_mov_b32_e32 v1, s3
; GFX10-NEXT: flat_atomic_inc v2, v[0:1], v2 glc
; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_mov_b32_e32 v0, s0
; GFX10-NEXT: v_mov_b32_e32 v1, s1
; GFX10-NEXT: flat_store_dword v[0:1], v2
@@ -1972,8 +1972,8 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i32(ptr %out, ptr %ptr) #1 {
; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-NEXT: flat_atomic_inc_u32 v2, v[0:1], v2 glc
; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: flat_store_b32 v[0:1], v2
; GFX11-NEXT: s_endpgm
@@ -2043,8 +2043,8 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i32_offset(ptr %out, ptr %ptr) #1
; GFX10-NEXT: v_mov_b32_e32 v1, s3
; GFX10-NEXT: flat_atomic_inc v2, v[0:1], v2 glc
; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_mov_b32_e32 v0, s0
; GFX10-NEXT: v_mov_b32_e32 v1, s1
; GFX10-NEXT: flat_store_dword v[0:1], v2
@@ -2058,8 +2058,8 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i32_offset(ptr %out, ptr %ptr) #1
; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-NEXT: flat_atomic_inc_u32 v2, v[0:1], v2 offset:16 glc
; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: flat_store_b32 v[0:1], v2
; GFX11-NEXT: s_endpgm
@@ -2130,8 +2130,8 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i32_offset_system(ptr %out, ptr %
; GFX10-NEXT: v_mov_b32_e32 v1, s3
; GFX10-NEXT: flat_atomic_inc v2, v[0:1], v2 glc
; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_mov_b32_e32 v0, s0
; GFX10-NEXT: v_mov_b32_e32 v1, s1
; GFX10-NEXT: flat_store_dword v[0:1], v2
@@ -2145,8 +2145,8 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i32_offset_system(ptr %out, ptr %
; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-NEXT: flat_atomic_inc_u32 v2, v[0:1], v2 offset:16 glc
; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: flat_store_b32 v[0:1], v2
; GFX11-NEXT: s_endpgm
@@ -2203,8 +2203,8 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i32(ptr %ptr) #1 {
; GFX10-NEXT: flat_atomic_inc v[0:1], v2
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: flat_atomic_inc_noret_i32:
@@ -2216,8 +2216,8 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i32(ptr %ptr) #1 {
; GFX11-NEXT: flat_atomic_inc_u32 v[0:1], v2
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%result = atomicrmw uinc_wrap ptr %ptr, i32 42 syncscope("agent") seq_cst, align 4
ret void
@@ -2276,8 +2276,8 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i32_offset(ptr %ptr) #1 {
; GFX10-NEXT: flat_atomic_inc v[0:1], v2
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: flat_atomic_inc_noret_i32_offset:
@@ -2289,8 +2289,8 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i32_offset(ptr %ptr) #1 {
; GFX11-NEXT: flat_atomic_inc_u32 v[0:1], v2 offset:16
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%gep = getelementptr i32, ptr %ptr, i32 4
%result = atomicrmw uinc_wrap ptr %gep, i32 42 syncscope("agent") seq_cst, align 4
@@ -2350,8 +2350,8 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i32_offset_system(ptr %ptr) #1
; GFX10-NEXT: flat_atomic_inc v[0:1], v2
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: flat_atomic_inc_noret_i32_offset_system:
@@ -2363,8 +2363,8 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i32_offset_system(ptr %ptr) #1
; GFX11-NEXT: flat_atomic_inc_u32 v[0:1], v2 offset:16
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%gep = getelementptr i32, ptr %ptr, i32 4
%result = atomicrmw uinc_wrap ptr %gep, i32 42 seq_cst, align 4
@@ -2450,8 +2450,8 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i32_offset_addr64(ptr %out, ptr %
; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
; GFX10-NEXT: flat_atomic_inc v3, v[0:1], v3 glc
; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_mov_b32_e32 v0, s0
; GFX10-NEXT: v_mov_b32_e32 v1, s1
; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
@@ -2470,8 +2470,8 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i32_offset_addr64(ptr %out, ptr %
; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
; GFX11-NEXT: flat_atomic_inc_u32 v3, v[0:1], v3 offset:20 glc
; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
@@ -2552,8 +2552,8 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i32_offset_addr64(ptr %ptr) #1
; GFX10-NEXT: flat_atomic_inc v[0:1], v2
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: flat_atomic_inc_noret_i32_offset_addr64:
@@ -2569,8 +2569,8 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i32_offset_addr64(ptr %ptr) #1
; GFX11-NEXT: flat_atomic_inc_u32 v[0:1], v2 offset:20
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i32, ptr %ptr, i32 %id
@@ -2744,8 +2744,8 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i64(ptr %out, ptr %ptr) #1 {
; GFX10-NEXT: v_mov_b32_e32 v3, s3
; GFX10-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_mov_b32_e32 v3, s1
; GFX10-NEXT: v_mov_b32_e32 v2, s0
; GFX10-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
@@ -2760,8 +2760,8 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i64(ptr %out, ptr %ptr) #1 {
; GFX11-NEXT: v_mov_b32_e32 v3, s3
; GFX11-NEXT: flat_atomic_inc_u64 v[0:1], v[2:3], v[0:1] glc
; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX11-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX11-NEXT: s_endpgm
@@ -2845,8 +2845,8 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i64_offset(ptr %out, ptr %ptr) #1
; GFX10-NEXT: v_mov_b32_e32 v3, s3
; GFX10-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_mov_b32_e32 v3, s1
; GFX10-NEXT: v_mov_b32_e32 v2, s0
; GFX10-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
@@ -2861,8 +2861,8 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i64_offset(ptr %out, ptr %ptr) #1
; GFX11-NEXT: v_mov_b32_e32 v3, s3
; GFX11-NEXT: flat_atomic_inc_u64 v[0:1], v[2:3], v[0:1] offset:32 glc
; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX11-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX11-NEXT: s_endpgm
@@ -2947,8 +2947,8 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i64_offset_system(ptr %out, ptr %
; GFX10-NEXT: v_mov_b32_e32 v3, s3
; GFX10-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_mov_b32_e32 v3, s1
; GFX10-NEXT: v_mov_b32_e32 v2, s0
; GFX10-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
@@ -2963,8 +2963,8 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i64_offset_system(ptr %out, ptr %
; GFX11-NEXT: v_mov_b32_e32 v3, s3
; GFX11-NEXT: flat_atomic_inc_u64 v[0:1], v[2:3], v[0:1] offset:32 glc
; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX11-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX11-NEXT: s_endpgm
@@ -3025,8 +3025,8 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i64(ptr %ptr) #1 {
; GFX10-NEXT: flat_atomic_inc_x2 v[2:3], v[0:1]
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: flat_atomic_inc_noret_i64:
@@ -3039,8 +3039,8 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i64(ptr %ptr) #1 {
; GFX11-NEXT: flat_atomic_inc_u64 v[2:3], v[0:1]
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%result = atomicrmw uinc_wrap ptr %ptr, i64 42 syncscope("agent") seq_cst, align 8
ret void
@@ -3103,8 +3103,8 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i64_offset(ptr %ptr) #1 {
; GFX10-NEXT: flat_atomic_inc_x2 v[2:3], v[0:1]
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: flat_atomic_inc_noret_i64_offset:
@@ -3117,8 +3117,8 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i64_offset(ptr %ptr) #1 {
; GFX11-NEXT: flat_atomic_inc_u64 v[2:3], v[0:1] offset:32
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%gep = getelementptr i64, ptr %ptr, i32 4
%result = atomicrmw uinc_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8
@@ -3182,8 +3182,8 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i64_offset_system(ptr %ptr) #1
; GFX10-NEXT: flat_atomic_inc_x2 v[2:3], v[0:1]
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: flat_atomic_inc_noret_i64_offset_system:
@@ -3196,8 +3196,8 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i64_offset_system(ptr %ptr) #1
; GFX11-NEXT: flat_atomic_inc_u64 v[2:3], v[0:1] offset:32
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%gep = getelementptr i64, ptr %ptr, i32 4
%result = atomicrmw uinc_wrap ptr %gep, i64 42 seq_cst, align 8
@@ -3293,8 +3293,8 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i64_offset_addr64(ptr %out, ptr %
; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
; GFX10-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_mov_b32_e32 v3, s1
; GFX10-NEXT: v_mov_b32_e32 v2, s0
; GFX10-NEXT: v_add_co_u32 v2, vcc_lo, v2, v4
@@ -3315,8 +3315,8 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i64_offset_addr64(ptr %out, ptr %
; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
; GFX11-NEXT: flat_atomic_inc_u64 v[0:1], v[0:1], v[2:3] offset:40 glc
; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v2, v4
@@ -3401,8 +3401,8 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i64_offset_addr64(ptr %ptr) #1
; GFX10-NEXT: flat_atomic_inc_x2 v[2:3], v[0:1]
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: flat_atomic_inc_noret_i64_offset_addr64:
@@ -3419,8 +3419,8 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i64_offset_addr64(ptr %ptr) #1
; GFX11-NEXT: flat_atomic_inc_u64 v[0:1], v[2:3] offset:40
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i64, ptr %ptr, i32 %id
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/bitcast_38_i16.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/bitcast_38_i16.ll
new file mode 100644
index 0000000..5bea13a
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/bitcast_38_i16.ll
@@ -0,0 +1,85 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GPRIDX %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,MOVREL %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10PLUS,GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11 %s
+define void @main(<19 x i32> %arg) {
+; GCN-LABEL: main:
+; GCN: ; %bb.0: ; %bb
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_mov_b32 s4, 0
+; GCN-NEXT: s_mov_b32 s12, s4
+; GCN-NEXT: v_cmp_eq_u16_e32 vcc, 0, v0
+; GCN-NEXT: v_mov_b32_e32 v1, 0
+; GCN-NEXT: s_mov_b32 s13, s4
+; GCN-NEXT: v_mov_b32_e32 v4, s12
+; GCN-NEXT: s_mov_b32 s5, s4
+; GCN-NEXT: s_mov_b32 s6, s4
+; GCN-NEXT: s_mov_b32 s7, s4
+; GCN-NEXT: s_mov_b32 s8, s4
+; GCN-NEXT: s_mov_b32 s9, s4
+; GCN-NEXT: s_mov_b32 s10, s4
+; GCN-NEXT: s_mov_b32 s11, s4
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GCN-NEXT: v_mov_b32_e32 v2, v1
+; GCN-NEXT: v_mov_b32_e32 v3, v1
+; GCN-NEXT: v_mov_b32_e32 v5, s13
+; GCN-NEXT: image_store v[0:3], v[4:5], s[4:11] unorm
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: main:
+; GFX10: ; %bb.0: ; %bb
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: v_mov_b32_e32 v1, 0
+; GFX10-NEXT: v_cmp_eq_u16_e32 vcc_lo, 0, v0
+; GFX10-NEXT: s_mov_b32 s10, s4
+; GFX10-NEXT: s_mov_b32 s11, s4
+; GFX10-NEXT: v_mov_b32_e32 v4, s10
+; GFX10-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX10-NEXT: v_mov_b32_e32 v3, v1
+; GFX10-NEXT: v_mov_b32_e32 v5, s11
+; GFX10-NEXT: s_mov_b32 s5, s4
+; GFX10-NEXT: s_mov_b32 s6, s4
+; GFX10-NEXT: s_mov_b32 s7, s4
+; GFX10-NEXT: s_mov_b32 s8, s4
+; GFX10-NEXT: s_mov_b32 s9, s4
+; GFX10-NEXT: image_store v[0:3], v[4:5], s[4:11] dim:SQ_RSRC_IMG_2D unorm
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: main:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: v_cmp_eq_u16_e32 vcc_lo, 0, v0
+; GFX11-NEXT: s_mov_b32 s6, s0
+; GFX11-NEXT: s_mov_b32 s7, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v4, s6
+; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-NEXT: v_mov_b32_e32 v5, s7
+; GFX11-NEXT: s_mov_b32 s1, s0
+; GFX11-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-NEXT: v_mov_b32_e32 v3, v1
+; GFX11-NEXT: s_mov_b32 s2, s0
+; GFX11-NEXT: s_mov_b32 s3, s0
+; GFX11-NEXT: s_mov_b32 s4, s0
+; GFX11-NEXT: s_mov_b32 s5, s0
+; GFX11-NEXT: image_store v[0:3], v[4:5], s[0:7] dim:SQ_RSRC_IMG_2D unorm
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+bb:
+ %i = bitcast <19 x i32> %arg to <38 x i16>
+ %i1 = extractelement <38 x i16> %i, i64 0
+ %i2 = icmp eq i16 %i1, 0
+ %i3 = zext i1 %i2 to i32
+ %i4 = bitcast i32 %i3 to float
+ %i5 = insertelement <4 x float> zeroinitializer, float %i4, i64 0
+ call void @llvm.amdgcn.image.store.2d.v4f32.i32(<4 x float> %i5, i32 0, i32 0, i32 0, <8 x i32> zeroinitializer, i32 0, i32 0)
+ ret void
+}
+declare void @llvm.amdgcn.image.store.2d.v4f32.i32(<4 x float>, i32 immarg, i32, i32, <8 x i32>, i32 immarg, i32 immarg)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX10PLUS: {{.*}}
+; GPRIDX: {{.*}}
+; MOVREL: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll
index ac15318..1e1c90d 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll
@@ -2626,6 +2626,132 @@ entry:
ret double %ext
}
+define amdgpu_ps double @dyn_extract_v7f64_s_v_bitcast(<14 x float> inreg %userData, i32 %sel) {
+; GCN-LABEL: dyn_extract_v7f64_s_v_bitcast:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_mov_b32_e32 v1, s2
+; GCN-NEXT: v_mov_b32_e32 v2, s3
+; GCN-NEXT: v_mov_b32_e32 v3, s4
+; GCN-NEXT: v_mov_b32_e32 v4, s5
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GCN-NEXT: v_mov_b32_e32 v5, s6
+; GCN-NEXT: v_mov_b32_e32 v6, s7
+; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 2, v0
+; GCN-NEXT: v_mov_b32_e32 v7, s8
+; GCN-NEXT: v_mov_b32_e32 v8, s9
+; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v6, vcc
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 3, v0
+; GCN-NEXT: v_mov_b32_e32 v9, s10
+; GCN-NEXT: v_mov_b32_e32 v10, s11
+; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 4, v0
+; GCN-NEXT: v_mov_b32_e32 v11, s12
+; GCN-NEXT: v_mov_b32_e32 v12, s13
+; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 5, v0
+; GCN-NEXT: v_mov_b32_e32 v13, s14
+; GCN-NEXT: v_mov_b32_e32 v14, s15
+; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v12, vcc
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 6, v0
+; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v13, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v14, vcc
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 7, v0
+; GCN-NEXT: ; kill: def $vgpr15 killed $sgpr2 killed $exec
+; GCN-NEXT: ; kill: def $vgpr16 killed $sgpr3 killed $exec
+; GCN-NEXT: v_cndmask_b32_e32 v0, v1, v15, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v16, vcc
+; GCN-NEXT: v_readfirstlane_b32 s0, v0
+; GCN-NEXT: v_readfirstlane_b32 s1, v1
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: dyn_extract_v7f64_s_v_bitcast:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: v_mov_b32_e32 v1, s4
+; GFX10-NEXT: v_mov_b32_e32 v2, s5
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX10-NEXT: s_mov_b32 s0, s14
+; GFX10-NEXT: v_cndmask_b32_e32 v1, s2, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v2, s3, v2, vcc_lo
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v0
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s6, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s7, vcc_lo
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s8, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s9, vcc_lo
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s10, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s11, vcc_lo
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v0
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s12, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s13, vcc_lo
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v0
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s15, vcc_lo
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v1, s2, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v2, s3, vcc_lo
+; GFX10-NEXT: v_readfirstlane_b32 s0, v0
+; GFX10-NEXT: v_readfirstlane_b32 s1, v1
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX11-LABEL: dyn_extract_v7f64_s_v_bitcast:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: v_dual_mov_b32 v1, s4 :: v_dual_mov_b32 v2, s5
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX11-NEXT: s_mov_b32 s0, s14
+; GFX11-NEXT: v_cndmask_b32_e32 v1, s2, v1, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e32 v2, s3, v2, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s6, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s7, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s8, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s9, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s10, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s11, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s12, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s13, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s0, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s15, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v1, s2, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v2, s3, vcc_lo
+; GFX11-NEXT: v_readfirstlane_b32 s0, v0
+; GFX11-NEXT: v_readfirstlane_b32 s1, v1
+; GFX11-NEXT: ; return to shader part epilog
+entry:
+ %bc = bitcast <14 x float> %userData to <7 x double>
+ %ext = extractelement <7 x double> %bc, i32 %sel
+ ret double %ext
+}
+
+define amdgpu_ps i64 @dyn_extract_v7i64_s_v_bitcast(<14 x i32> inreg %userData, i32 %sel) {
+; GCN-LABEL: dyn_extract_v7i64_s_v_bitcast:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_mov_b32 s0, s10
+; GCN-NEXT: s_mov_b32 s1, s11
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: dyn_extract_v7i64_s_v_bitcast:
+; GFX10PLUS: ; %bb.0: ; %entry
+; GFX10PLUS-NEXT: s_mov_b32 s0, s10
+; GFX10PLUS-NEXT: s_mov_b32 s1, s11
+; GFX10PLUS-NEXT: ; return to shader part epilog
+entry:
+ %.bc = bitcast <14 x i32> %userData to <7 x i64>
+ %ext = extractelement <7 x i64> %.bc, i32 4
+ ret i64 %ext
+}
+
define amdgpu_ps double @dyn_extract_v7f64_s_v(<7 x double> inreg %vec, i32 %sel) {
; GCN-LABEL: dyn_extract_v7f64_s_v:
; GCN: ; %bb.0: ; %entry
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-build-vector.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-build-vector.mir
index 10766b0..25652b6 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-build-vector.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-build-vector.mir
@@ -299,129 +299,6 @@ body: |
S_NOP 0, implicit %12
...
---
-name: legal_v13s32
-body: |
- bb.0:
- liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12
- ; CHECK-LABEL: name: legal_v13s32
- ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
- ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
- ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
- ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
- ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
- ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
- ; CHECK-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
- ; CHECK-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
- ; CHECK-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
- ; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
- ; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<13 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32)
- ; CHECK-NEXT: S_NOP 0, implicit [[BUILD_VECTOR]](<13 x s32>)
- %0:_(s32) = COPY $vgpr0
- %1:_(s32) = COPY $vgpr1
- %2:_(s32) = COPY $vgpr2
- %3:_(s32) = COPY $vgpr3
- %4:_(s32) = COPY $vgpr4
- %5:_(s32) = COPY $vgpr5
- %6:_(s32) = COPY $vgpr6
- %7:_(s32) = COPY $vgpr7
- %8:_(s32) = COPY $vgpr8
- %9:_(s32) = COPY $vgpr9
- %10:_(s32) = COPY $vgpr10
- %11:_(s32) = COPY $vgpr11
- %12:_(s32) = COPY $vgpr12
- %13:_(<13 x s32>) = G_BUILD_VECTOR %0, %1, %2, %3, %4, %5, %6, %7, %8, %9, %10, %11, %12
- S_NOP 0, implicit %13
-...
----
-name: legal_v14s32
-body: |
- bb.0:
- liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13
- ; CHECK-LABEL: name: legal_v14s32
- ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
- ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
- ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
- ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
- ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
- ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
- ; CHECK-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
- ; CHECK-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
- ; CHECK-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
- ; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
- ; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
- ; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<14 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32)
- ; CHECK-NEXT: S_NOP 0, implicit [[BUILD_VECTOR]](<14 x s32>)
- %0:_(s32) = COPY $vgpr0
- %1:_(s32) = COPY $vgpr1
- %2:_(s32) = COPY $vgpr2
- %3:_(s32) = COPY $vgpr3
- %4:_(s32) = COPY $vgpr4
- %5:_(s32) = COPY $vgpr5
- %6:_(s32) = COPY $vgpr6
- %7:_(s32) = COPY $vgpr7
- %8:_(s32) = COPY $vgpr8
- %9:_(s32) = COPY $vgpr9
- %10:_(s32) = COPY $vgpr10
- %11:_(s32) = COPY $vgpr11
- %12:_(s32) = COPY $vgpr12
- %13:_(s32) = COPY $vgpr13
- %14:_(<14 x s32>) = G_BUILD_VECTOR %0, %1, %2, %3, %4, %5, %6, %7, %8, %9, %10, %11, %12, %13
- S_NOP 0, implicit %14
-...
----
-name: legal_v15s32
-body: |
- bb.0:
- liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14
- ; CHECK-LABEL: name: legal_v15s32
- ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
- ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
- ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
- ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
- ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
- ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
- ; CHECK-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
- ; CHECK-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
- ; CHECK-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
- ; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
- ; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
- ; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
- ; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<15 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32)
- ; CHECK-NEXT: S_NOP 0, implicit [[BUILD_VECTOR]](<15 x s32>)
- %0:_(s32) = COPY $vgpr0
- %1:_(s32) = COPY $vgpr1
- %2:_(s32) = COPY $vgpr2
- %3:_(s32) = COPY $vgpr3
- %4:_(s32) = COPY $vgpr4
- %5:_(s32) = COPY $vgpr5
- %6:_(s32) = COPY $vgpr6
- %7:_(s32) = COPY $vgpr7
- %8:_(s32) = COPY $vgpr8
- %9:_(s32) = COPY $vgpr9
- %10:_(s32) = COPY $vgpr10
- %11:_(s32) = COPY $vgpr11
- %12:_(s32) = COPY $vgpr12
- %13:_(s32) = COPY $vgpr13
- %14:_(s32) = COPY $vgpr14
- %15:_(<15 x s32>) = G_BUILD_VECTOR %0, %1, %2, %3, %4, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14
- S_NOP 0, implicit %15
-...
----
name: legal_v16s32
body: |
bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/memory-legalizer-atomic-fence.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/memory-legalizer-atomic-fence.ll
index 03cd833..2727fde 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/memory-legalizer-atomic-fence.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/memory-legalizer-atomic-fence.ll
@@ -27,32 +27,32 @@ define amdgpu_kernel void @system_one_as_acquire() {
; GFX10WGP: bb.0.entry:
; GFX10WGP-NEXT: S_WAITCNT_soft 16240
; GFX10WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: S_ENDPGM 0
;
; GFX10CU-LABEL: name: system_one_as_acquire
; GFX10CU: bb.0.entry:
; GFX10CU-NEXT: S_WAITCNT_soft 16240
; GFX10CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX10CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10CU-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX10CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10CU-NEXT: S_ENDPGM 0
;
; GFX11WGP-LABEL: name: system_one_as_acquire
; GFX11WGP: bb.0.entry:
; GFX11WGP-NEXT: S_WAITCNT_soft 1015
; GFX11WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX11WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11WGP-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX11WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11WGP-NEXT: S_ENDPGM 0
;
; GFX11CU-LABEL: name: system_one_as_acquire
; GFX11CU: bb.0.entry:
; GFX11CU-NEXT: S_WAITCNT_soft 1015
; GFX11CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX11CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11CU-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX11CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11CU-NEXT: S_ENDPGM 0
entry:
fence syncscope("one-as") acquire
@@ -115,32 +115,32 @@ define amdgpu_kernel void @system_one_as_acq_rel() {
; GFX10WGP: bb.0.entry:
; GFX10WGP-NEXT: S_WAITCNT_soft 16240
; GFX10WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: S_ENDPGM 0
;
; GFX10CU-LABEL: name: system_one_as_acq_rel
; GFX10CU: bb.0.entry:
; GFX10CU-NEXT: S_WAITCNT_soft 16240
; GFX10CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX10CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10CU-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX10CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10CU-NEXT: S_ENDPGM 0
;
; GFX11WGP-LABEL: name: system_one_as_acq_rel
; GFX11WGP: bb.0.entry:
; GFX11WGP-NEXT: S_WAITCNT_soft 1015
; GFX11WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX11WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11WGP-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX11WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11WGP-NEXT: S_ENDPGM 0
;
; GFX11CU-LABEL: name: system_one_as_acq_rel
; GFX11CU: bb.0.entry:
; GFX11CU-NEXT: S_WAITCNT_soft 1015
; GFX11CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX11CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11CU-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX11CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11CU-NEXT: S_ENDPGM 0
entry:
fence syncscope("one-as") acq_rel
@@ -164,32 +164,32 @@ define amdgpu_kernel void @system_one_as_seq_cst() {
; GFX10WGP: bb.0.entry:
; GFX10WGP-NEXT: S_WAITCNT_soft 16240
; GFX10WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: S_ENDPGM 0
;
; GFX10CU-LABEL: name: system_one_as_seq_cst
; GFX10CU: bb.0.entry:
; GFX10CU-NEXT: S_WAITCNT_soft 16240
; GFX10CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX10CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10CU-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX10CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10CU-NEXT: S_ENDPGM 0
;
; GFX11WGP-LABEL: name: system_one_as_seq_cst
; GFX11WGP: bb.0.entry:
; GFX11WGP-NEXT: S_WAITCNT_soft 1015
; GFX11WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX11WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11WGP-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX11WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11WGP-NEXT: S_ENDPGM 0
;
; GFX11CU-LABEL: name: system_one_as_seq_cst
; GFX11CU: bb.0.entry:
; GFX11CU-NEXT: S_WAITCNT_soft 1015
; GFX11CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX11CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11CU-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX11CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11CU-NEXT: S_ENDPGM 0
entry:
fence syncscope("one-as") seq_cst
@@ -329,32 +329,32 @@ define amdgpu_kernel void @agent_one_as_acquire() {
; GFX10WGP: bb.0.entry:
; GFX10WGP-NEXT: S_WAITCNT_soft 16240
; GFX10WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: S_ENDPGM 0
;
; GFX10CU-LABEL: name: agent_one_as_acquire
; GFX10CU: bb.0.entry:
; GFX10CU-NEXT: S_WAITCNT_soft 16240
; GFX10CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX10CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10CU-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX10CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10CU-NEXT: S_ENDPGM 0
;
; GFX11WGP-LABEL: name: agent_one_as_acquire
; GFX11WGP: bb.0.entry:
; GFX11WGP-NEXT: S_WAITCNT_soft 1015
; GFX11WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX11WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11WGP-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX11WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11WGP-NEXT: S_ENDPGM 0
;
; GFX11CU-LABEL: name: agent_one_as_acquire
; GFX11CU: bb.0.entry:
; GFX11CU-NEXT: S_WAITCNT_soft 1015
; GFX11CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX11CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11CU-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX11CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11CU-NEXT: S_ENDPGM 0
entry:
fence syncscope("agent-one-as") acquire
@@ -417,32 +417,32 @@ define amdgpu_kernel void @agent_one_as_acq_rel() {
; GFX10WGP: bb.0.entry:
; GFX10WGP-NEXT: S_WAITCNT_soft 16240
; GFX10WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: S_ENDPGM 0
;
; GFX10CU-LABEL: name: agent_one_as_acq_rel
; GFX10CU: bb.0.entry:
; GFX10CU-NEXT: S_WAITCNT_soft 16240
; GFX10CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX10CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10CU-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX10CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10CU-NEXT: S_ENDPGM 0
;
; GFX11WGP-LABEL: name: agent_one_as_acq_rel
; GFX11WGP: bb.0.entry:
; GFX11WGP-NEXT: S_WAITCNT_soft 1015
; GFX11WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX11WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11WGP-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX11WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11WGP-NEXT: S_ENDPGM 0
;
; GFX11CU-LABEL: name: agent_one_as_acq_rel
; GFX11CU: bb.0.entry:
; GFX11CU-NEXT: S_WAITCNT_soft 1015
; GFX11CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX11CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11CU-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX11CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11CU-NEXT: S_ENDPGM 0
entry:
fence syncscope("agent-one-as") acq_rel
@@ -466,32 +466,32 @@ define amdgpu_kernel void @agent_one_as_seq_cst() {
; GFX10WGP: bb.0.entry:
; GFX10WGP-NEXT: S_WAITCNT_soft 16240
; GFX10WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: S_ENDPGM 0
;
; GFX10CU-LABEL: name: agent_one_as_seq_cst
; GFX10CU: bb.0.entry:
; GFX10CU-NEXT: S_WAITCNT_soft 16240
; GFX10CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX10CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10CU-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX10CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10CU-NEXT: S_ENDPGM 0
;
; GFX11WGP-LABEL: name: agent_one_as_seq_cst
; GFX11WGP: bb.0.entry:
; GFX11WGP-NEXT: S_WAITCNT_soft 1015
; GFX11WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX11WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11WGP-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX11WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11WGP-NEXT: S_ENDPGM 0
;
; GFX11CU-LABEL: name: agent_one_as_seq_cst
; GFX11CU: bb.0.entry:
; GFX11CU-NEXT: S_WAITCNT_soft 1015
; GFX11CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX11CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11CU-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX11CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11CU-NEXT: S_ENDPGM 0
entry:
fence syncscope("agent-one-as") seq_cst
@@ -769,32 +769,32 @@ define amdgpu_kernel void @system_acquire() {
; GFX10WGP: bb.0.entry:
; GFX10WGP-NEXT: S_WAITCNT_soft 112
; GFX10WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: S_ENDPGM 0
;
; GFX10CU-LABEL: name: system_acquire
; GFX10CU: bb.0.entry:
; GFX10CU-NEXT: S_WAITCNT_soft 112
; GFX10CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX10CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10CU-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX10CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10CU-NEXT: S_ENDPGM 0
;
; GFX11WGP-LABEL: name: system_acquire
; GFX11WGP: bb.0.entry:
; GFX11WGP-NEXT: S_WAITCNT_soft 7
; GFX11WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX11WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11WGP-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX11WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11WGP-NEXT: S_ENDPGM 0
;
; GFX11CU-LABEL: name: system_acquire
; GFX11CU: bb.0.entry:
; GFX11CU-NEXT: S_WAITCNT_soft 7
; GFX11CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX11CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11CU-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX11CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11CU-NEXT: S_ENDPGM 0
entry:
fence acquire
@@ -857,32 +857,32 @@ define amdgpu_kernel void @system_acq_rel() {
; GFX10WGP: bb.0.entry:
; GFX10WGP-NEXT: S_WAITCNT_soft 112
; GFX10WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: S_ENDPGM 0
;
; GFX10CU-LABEL: name: system_acq_rel
; GFX10CU: bb.0.entry:
; GFX10CU-NEXT: S_WAITCNT_soft 112
; GFX10CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX10CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10CU-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX10CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10CU-NEXT: S_ENDPGM 0
;
; GFX11WGP-LABEL: name: system_acq_rel
; GFX11WGP: bb.0.entry:
; GFX11WGP-NEXT: S_WAITCNT_soft 7
; GFX11WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX11WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11WGP-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX11WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11WGP-NEXT: S_ENDPGM 0
;
; GFX11CU-LABEL: name: system_acq_rel
; GFX11CU: bb.0.entry:
; GFX11CU-NEXT: S_WAITCNT_soft 7
; GFX11CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX11CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11CU-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX11CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11CU-NEXT: S_ENDPGM 0
entry:
fence acq_rel
@@ -906,32 +906,32 @@ define amdgpu_kernel void @system_seq_cst() {
; GFX10WGP: bb.0.entry:
; GFX10WGP-NEXT: S_WAITCNT_soft 112
; GFX10WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: S_ENDPGM 0
;
; GFX10CU-LABEL: name: system_seq_cst
; GFX10CU: bb.0.entry:
; GFX10CU-NEXT: S_WAITCNT_soft 112
; GFX10CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX10CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10CU-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX10CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10CU-NEXT: S_ENDPGM 0
;
; GFX11WGP-LABEL: name: system_seq_cst
; GFX11WGP: bb.0.entry:
; GFX11WGP-NEXT: S_WAITCNT_soft 7
; GFX11WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX11WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11WGP-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX11WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11WGP-NEXT: S_ENDPGM 0
;
; GFX11CU-LABEL: name: system_seq_cst
; GFX11CU: bb.0.entry:
; GFX11CU-NEXT: S_WAITCNT_soft 7
; GFX11CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX11CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11CU-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX11CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11CU-NEXT: S_ENDPGM 0
entry:
fence seq_cst
@@ -1071,32 +1071,32 @@ define amdgpu_kernel void @agent_acquire() {
; GFX10WGP: bb.0.entry:
; GFX10WGP-NEXT: S_WAITCNT_soft 112
; GFX10WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: S_ENDPGM 0
;
; GFX10CU-LABEL: name: agent_acquire
; GFX10CU: bb.0.entry:
; GFX10CU-NEXT: S_WAITCNT_soft 112
; GFX10CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX10CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10CU-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX10CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10CU-NEXT: S_ENDPGM 0
;
; GFX11WGP-LABEL: name: agent_acquire
; GFX11WGP: bb.0.entry:
; GFX11WGP-NEXT: S_WAITCNT_soft 7
; GFX11WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX11WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11WGP-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX11WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11WGP-NEXT: S_ENDPGM 0
;
; GFX11CU-LABEL: name: agent_acquire
; GFX11CU: bb.0.entry:
; GFX11CU-NEXT: S_WAITCNT_soft 7
; GFX11CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX11CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11CU-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX11CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11CU-NEXT: S_ENDPGM 0
entry:
fence syncscope("agent") acquire
@@ -1159,32 +1159,32 @@ define amdgpu_kernel void @agent_acq_rel() {
; GFX10WGP: bb.0.entry:
; GFX10WGP-NEXT: S_WAITCNT_soft 112
; GFX10WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: S_ENDPGM 0
;
; GFX10CU-LABEL: name: agent_acq_rel
; GFX10CU: bb.0.entry:
; GFX10CU-NEXT: S_WAITCNT_soft 112
; GFX10CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX10CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10CU-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX10CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10CU-NEXT: S_ENDPGM 0
;
; GFX11WGP-LABEL: name: agent_acq_rel
; GFX11WGP: bb.0.entry:
; GFX11WGP-NEXT: S_WAITCNT_soft 7
; GFX11WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX11WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11WGP-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX11WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11WGP-NEXT: S_ENDPGM 0
;
; GFX11CU-LABEL: name: agent_acq_rel
; GFX11CU: bb.0.entry:
; GFX11CU-NEXT: S_WAITCNT_soft 7
; GFX11CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX11CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11CU-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX11CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11CU-NEXT: S_ENDPGM 0
entry:
fence syncscope("agent") acq_rel
@@ -1208,32 +1208,32 @@ define amdgpu_kernel void @agent_seq_cst() {
; GFX10WGP: bb.0.entry:
; GFX10WGP-NEXT: S_WAITCNT_soft 112
; GFX10WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: S_ENDPGM 0
;
; GFX10CU-LABEL: name: agent_seq_cst
; GFX10CU: bb.0.entry:
; GFX10CU-NEXT: S_WAITCNT_soft 112
; GFX10CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX10CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10CU-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX10CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10CU-NEXT: S_ENDPGM 0
;
; GFX11WGP-LABEL: name: agent_seq_cst
; GFX11WGP: bb.0.entry:
; GFX11WGP-NEXT: S_WAITCNT_soft 7
; GFX11WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX11WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11WGP-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX11WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11WGP-NEXT: S_ENDPGM 0
;
; GFX11CU-LABEL: name: agent_seq_cst
; GFX11CU: bb.0.entry:
; GFX11CU-NEXT: S_WAITCNT_soft 7
; GFX11CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
- ; GFX11CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11CU-NEXT: BUFFER_GL1_INV implicit $exec
+ ; GFX11CU-NEXT: BUFFER_GL0_INV implicit $exec
; GFX11CU-NEXT: S_ENDPGM 0
entry:
fence syncscope("agent") seq_cst
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
index b5cccb6..8ee0ee3 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
@@ -99,8 +99,8 @@ define amdgpu_kernel void @add_i32_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1064-NEXT: s_mov_b32 s9, s3
; GFX1064-NEXT: buffer_atomic_add v1, off, s[8:11], 0 glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: buffer_gl0_inv
; GFX1064-NEXT: buffer_gl1_inv
+; GFX1064-NEXT: buffer_gl0_inv
; GFX1064-NEXT: .LBB0_2:
; GFX1064-NEXT: s_waitcnt_depctr 0xffe3
; GFX1064-NEXT: s_or_b64 exec, exec, s[4:5]
@@ -132,8 +132,8 @@ define amdgpu_kernel void @add_i32_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1032-NEXT: s_mov_b32 s9, s3
; GFX1032-NEXT: buffer_atomic_add v1, off, s[8:11], 0 glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: buffer_gl0_inv
; GFX1032-NEXT: buffer_gl1_inv
+; GFX1032-NEXT: buffer_gl0_inv
; GFX1032-NEXT: .LBB0_2:
; GFX1032-NEXT: s_waitcnt_depctr 0xffe3
; GFX1032-NEXT: s_or_b32 exec_lo, exec_lo, s4
@@ -167,8 +167,8 @@ define amdgpu_kernel void @add_i32_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1164-NEXT: s_mov_b32 s9, s3
; GFX1164-NEXT: buffer_atomic_add_u32 v1, off, s[8:11], 0 glc
; GFX1164-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-NEXT: buffer_gl0_inv
; GFX1164-NEXT: buffer_gl1_inv
+; GFX1164-NEXT: buffer_gl0_inv
; GFX1164-NEXT: .LBB0_2:
; GFX1164-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
@@ -203,8 +203,8 @@ define amdgpu_kernel void @add_i32_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1132-NEXT: s_mov_b32 s9, s3
; GFX1132-NEXT: buffer_atomic_add_u32 v1, off, s[8:11], 0 glc
; GFX1132-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-NEXT: buffer_gl0_inv
; GFX1132-NEXT: buffer_gl1_inv
+; GFX1132-NEXT: buffer_gl0_inv
; GFX1132-NEXT: .LBB0_2:
; GFX1132-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
@@ -420,8 +420,8 @@ define amdgpu_kernel void @add_i32_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1064-NEXT: s_mov_b32 s13, s7
; GFX1064-NEXT: buffer_atomic_add v1, off, s[12:15], 0 glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: buffer_gl0_inv
; GFX1064-NEXT: buffer_gl1_inv
+; GFX1064-NEXT: buffer_gl0_inv
; GFX1064-NEXT: .LBB1_2:
; GFX1064-NEXT: s_waitcnt_depctr 0xffe3
; GFX1064-NEXT: s_or_b64 exec, exec, s[0:1]
@@ -455,8 +455,8 @@ define amdgpu_kernel void @add_i32_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1032-NEXT: s_mov_b32 s9, s7
; GFX1032-NEXT: buffer_atomic_add v1, off, s[8:11], 0 glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: buffer_gl0_inv
; GFX1032-NEXT: buffer_gl1_inv
+; GFX1032-NEXT: buffer_gl0_inv
; GFX1032-NEXT: .LBB1_2:
; GFX1032-NEXT: s_waitcnt_depctr 0xffe3
; GFX1032-NEXT: s_or_b32 exec_lo, exec_lo, s0
@@ -492,8 +492,8 @@ define amdgpu_kernel void @add_i32_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1164-NEXT: s_mov_b32 s13, s7
; GFX1164-NEXT: buffer_atomic_add_u32 v1, off, s[12:15], 0 glc
; GFX1164-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-NEXT: buffer_gl0_inv
; GFX1164-NEXT: buffer_gl1_inv
+; GFX1164-NEXT: buffer_gl0_inv
; GFX1164-NEXT: .LBB1_2:
; GFX1164-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX1164-NEXT: v_readfirstlane_b32 s0, v1
@@ -530,8 +530,8 @@ define amdgpu_kernel void @add_i32_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1132-NEXT: s_mov_b32 s9, s7
; GFX1132-NEXT: buffer_atomic_add_u32 v1, off, s[8:11], 0 glc
; GFX1132-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-NEXT: buffer_gl0_inv
; GFX1132-NEXT: buffer_gl1_inv
+; GFX1132-NEXT: buffer_gl0_inv
; GFX1132-NEXT: .LBB1_2:
; GFX1132-NEXT: s_or_b32 exec_lo, exec_lo, s1
; GFX1132-NEXT: v_readfirstlane_b32 s2, v1
@@ -767,8 +767,8 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1064-NEXT: s_mov_b32 s9, s3
; GFX1064-NEXT: buffer_atomic_add v0, off, s[8:11], 0 glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: buffer_gl0_inv
; GFX1064-NEXT: buffer_gl1_inv
+; GFX1064-NEXT: buffer_gl0_inv
; GFX1064-NEXT: .LBB2_4:
; GFX1064-NEXT: s_waitcnt_depctr 0xffe3
; GFX1064-NEXT: s_or_b64 exec, exec, s[4:5]
@@ -812,8 +812,8 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1032-NEXT: s_mov_b32 s9, s3
; GFX1032-NEXT: buffer_atomic_add v0, off, s[8:11], 0 glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: buffer_gl0_inv
; GFX1032-NEXT: buffer_gl1_inv
+; GFX1032-NEXT: buffer_gl0_inv
; GFX1032-NEXT: .LBB2_4:
; GFX1032-NEXT: s_waitcnt_depctr 0xffe3
; GFX1032-NEXT: s_or_b32 exec_lo, exec_lo, s5
@@ -861,8 +861,8 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1164-NEXT: s_mov_b32 s9, s3
; GFX1164-NEXT: buffer_atomic_add_u32 v0, off, s[8:11], 0 glc
; GFX1164-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-NEXT: buffer_gl0_inv
; GFX1164-NEXT: buffer_gl1_inv
+; GFX1164-NEXT: buffer_gl0_inv
; GFX1164-NEXT: .LBB2_4:
; GFX1164-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
@@ -910,8 +910,8 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1132-NEXT: s_mov_b32 s9, s3
; GFX1132-NEXT: buffer_atomic_add_u32 v0, off, s[8:11], 0 glc
; GFX1132-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-NEXT: buffer_gl0_inv
; GFX1132-NEXT: buffer_gl1_inv
+; GFX1132-NEXT: buffer_gl0_inv
; GFX1132-NEXT: .LBB2_4:
; GFX1132-NEXT: s_or_b32 exec_lo, exec_lo, s5
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
@@ -1128,8 +1128,8 @@ define amdgpu_kernel void @add_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1064-NEXT: s_mov_b32 s9, s3
; GFX1064-NEXT: buffer_atomic_add_x2 v[0:1], off, s[8:11], 0 glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: buffer_gl0_inv
; GFX1064-NEXT: buffer_gl1_inv
+; GFX1064-NEXT: buffer_gl0_inv
; GFX1064-NEXT: .LBB3_2:
; GFX1064-NEXT: s_waitcnt_depctr 0xffe3
; GFX1064-NEXT: s_or_b64 exec, exec, s[4:5]
@@ -1163,8 +1163,8 @@ define amdgpu_kernel void @add_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1032-NEXT: s_mov_b32 s9, s3
; GFX1032-NEXT: buffer_atomic_add_x2 v[0:1], off, s[8:11], 0 glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: buffer_gl0_inv
; GFX1032-NEXT: buffer_gl1_inv
+; GFX1032-NEXT: buffer_gl0_inv
; GFX1032-NEXT: .LBB3_2:
; GFX1032-NEXT: s_waitcnt_depctr 0xffe3
; GFX1032-NEXT: s_or_b32 exec_lo, exec_lo, s4
@@ -1200,8 +1200,8 @@ define amdgpu_kernel void @add_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1164-NEXT: s_mov_b32 s9, s3
; GFX1164-NEXT: buffer_atomic_add_u64 v[0:1], off, s[8:11], 0 glc
; GFX1164-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-NEXT: buffer_gl0_inv
; GFX1164-NEXT: buffer_gl1_inv
+; GFX1164-NEXT: buffer_gl0_inv
; GFX1164-NEXT: .LBB3_2:
; GFX1164-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
@@ -1237,8 +1237,8 @@ define amdgpu_kernel void @add_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1132-NEXT: s_mov_b32 s9, s3
; GFX1132-NEXT: buffer_atomic_add_u64 v[0:1], off, s[8:11], 0 glc
; GFX1132-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-NEXT: buffer_gl0_inv
; GFX1132-NEXT: buffer_gl1_inv
+; GFX1132-NEXT: buffer_gl0_inv
; GFX1132-NEXT: .LBB3_2:
; GFX1132-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
@@ -1488,8 +1488,8 @@ define amdgpu_kernel void @add_i64_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1064-NEXT: s_mov_b32 s9, s7
; GFX1064-NEXT: buffer_atomic_add_x2 v[0:1], off, s[8:11], 0 glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: buffer_gl0_inv
; GFX1064-NEXT: buffer_gl1_inv
+; GFX1064-NEXT: buffer_gl0_inv
; GFX1064-NEXT: .LBB4_2:
; GFX1064-NEXT: s_waitcnt_depctr 0xffe3
; GFX1064-NEXT: s_or_b64 exec, exec, s[0:1]
@@ -1529,8 +1529,8 @@ define amdgpu_kernel void @add_i64_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1032-NEXT: s_mov_b32 s9, s7
; GFX1032-NEXT: buffer_atomic_add_x2 v[0:1], off, s[8:11], 0 glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: buffer_gl0_inv
; GFX1032-NEXT: buffer_gl1_inv
+; GFX1032-NEXT: buffer_gl0_inv
; GFX1032-NEXT: .LBB4_2:
; GFX1032-NEXT: s_waitcnt_depctr 0xffe3
; GFX1032-NEXT: s_or_b32 exec_lo, exec_lo, s0
@@ -1572,8 +1572,8 @@ define amdgpu_kernel void @add_i64_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1164-NEXT: s_mov_b32 s9, s7
; GFX1164-NEXT: buffer_atomic_add_u64 v[0:1], off, s[8:11], 0 glc
; GFX1164-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-NEXT: buffer_gl0_inv
; GFX1164-NEXT: buffer_gl1_inv
+; GFX1164-NEXT: buffer_gl0_inv
; GFX1164-NEXT: .LBB4_2:
; GFX1164-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX1164-NEXT: v_readfirstlane_b32 s2, v0
@@ -1618,8 +1618,8 @@ define amdgpu_kernel void @add_i64_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1132-NEXT: s_mov_b32 s9, s7
; GFX1132-NEXT: buffer_atomic_add_u64 v[0:1], off, s[8:11], 0 glc
; GFX1132-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-NEXT: buffer_gl0_inv
; GFX1132-NEXT: buffer_gl1_inv
+; GFX1132-NEXT: buffer_gl0_inv
; GFX1132-NEXT: .LBB4_2:
; GFX1132-NEXT: s_or_b32 exec_lo, exec_lo, s2
; GFX1132-NEXT: v_readfirstlane_b32 s2, v0
@@ -1777,8 +1777,8 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX10-NEXT: s_mov_b32 s4, s0
; GFX10-NEXT: buffer_atomic_add_x2 v[0:1], off, s[8:11], 0 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_mov_b32 s5, s1
; GFX10-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX10-NEXT: s_endpgm
@@ -1797,8 +1797,8 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX11-NEXT: s_mov_b32 s4, s0
; GFX11-NEXT: buffer_atomic_add_u64 v[0:1], off, s[8:11], 0 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_mov_b32 s5, s1
; GFX11-NEXT: buffer_store_b64 v[0:1], off, s[4:7], 0
; GFX11-NEXT: s_nop 0
@@ -1954,8 +1954,8 @@ define amdgpu_kernel void @sub_i32_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1064-NEXT: s_mov_b32 s9, s3
; GFX1064-NEXT: buffer_atomic_sub v1, off, s[8:11], 0 glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: buffer_gl0_inv
; GFX1064-NEXT: buffer_gl1_inv
+; GFX1064-NEXT: buffer_gl0_inv
; GFX1064-NEXT: .LBB6_2:
; GFX1064-NEXT: s_waitcnt_depctr 0xffe3
; GFX1064-NEXT: s_or_b64 exec, exec, s[4:5]
@@ -1988,8 +1988,8 @@ define amdgpu_kernel void @sub_i32_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1032-NEXT: s_mov_b32 s9, s3
; GFX1032-NEXT: buffer_atomic_sub v1, off, s[8:11], 0 glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: buffer_gl0_inv
; GFX1032-NEXT: buffer_gl1_inv
+; GFX1032-NEXT: buffer_gl0_inv
; GFX1032-NEXT: .LBB6_2:
; GFX1032-NEXT: s_waitcnt_depctr 0xffe3
; GFX1032-NEXT: s_or_b32 exec_lo, exec_lo, s4
@@ -2024,8 +2024,8 @@ define amdgpu_kernel void @sub_i32_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1164-NEXT: s_mov_b32 s9, s3
; GFX1164-NEXT: buffer_atomic_sub_u32 v1, off, s[8:11], 0 glc
; GFX1164-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-NEXT: buffer_gl0_inv
; GFX1164-NEXT: buffer_gl1_inv
+; GFX1164-NEXT: buffer_gl0_inv
; GFX1164-NEXT: .LBB6_2:
; GFX1164-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
@@ -2061,8 +2061,8 @@ define amdgpu_kernel void @sub_i32_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1132-NEXT: s_mov_b32 s9, s3
; GFX1132-NEXT: buffer_atomic_sub_u32 v1, off, s[8:11], 0 glc
; GFX1132-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-NEXT: buffer_gl0_inv
; GFX1132-NEXT: buffer_gl1_inv
+; GFX1132-NEXT: buffer_gl0_inv
; GFX1132-NEXT: .LBB6_2:
; GFX1132-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
@@ -2281,8 +2281,8 @@ define amdgpu_kernel void @sub_i32_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1064-NEXT: s_mov_b32 s13, s7
; GFX1064-NEXT: buffer_atomic_sub v1, off, s[12:15], 0 glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: buffer_gl0_inv
; GFX1064-NEXT: buffer_gl1_inv
+; GFX1064-NEXT: buffer_gl0_inv
; GFX1064-NEXT: .LBB7_2:
; GFX1064-NEXT: s_waitcnt_depctr 0xffe3
; GFX1064-NEXT: s_or_b64 exec, exec, s[0:1]
@@ -2317,8 +2317,8 @@ define amdgpu_kernel void @sub_i32_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1032-NEXT: s_mov_b32 s9, s7
; GFX1032-NEXT: buffer_atomic_sub v1, off, s[8:11], 0 glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: buffer_gl0_inv
; GFX1032-NEXT: buffer_gl1_inv
+; GFX1032-NEXT: buffer_gl0_inv
; GFX1032-NEXT: .LBB7_2:
; GFX1032-NEXT: s_waitcnt_depctr 0xffe3
; GFX1032-NEXT: s_or_b32 exec_lo, exec_lo, s0
@@ -2355,8 +2355,8 @@ define amdgpu_kernel void @sub_i32_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1164-NEXT: s_mov_b32 s13, s7
; GFX1164-NEXT: buffer_atomic_sub_u32 v1, off, s[12:15], 0 glc
; GFX1164-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-NEXT: buffer_gl0_inv
; GFX1164-NEXT: buffer_gl1_inv
+; GFX1164-NEXT: buffer_gl0_inv
; GFX1164-NEXT: .LBB7_2:
; GFX1164-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
@@ -2394,8 +2394,8 @@ define amdgpu_kernel void @sub_i32_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1132-NEXT: s_mov_b32 s9, s7
; GFX1132-NEXT: buffer_atomic_sub_u32 v1, off, s[8:11], 0 glc
; GFX1132-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-NEXT: buffer_gl0_inv
; GFX1132-NEXT: buffer_gl1_inv
+; GFX1132-NEXT: buffer_gl0_inv
; GFX1132-NEXT: .LBB7_2:
; GFX1132-NEXT: s_or_b32 exec_lo, exec_lo, s1
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
@@ -2634,8 +2634,8 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1064-NEXT: s_mov_b32 s9, s3
; GFX1064-NEXT: buffer_atomic_sub v0, off, s[8:11], 0 glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: buffer_gl0_inv
; GFX1064-NEXT: buffer_gl1_inv
+; GFX1064-NEXT: buffer_gl0_inv
; GFX1064-NEXT: .LBB8_4:
; GFX1064-NEXT: s_waitcnt_depctr 0xffe3
; GFX1064-NEXT: s_or_b64 exec, exec, s[4:5]
@@ -2679,8 +2679,8 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1032-NEXT: s_mov_b32 s9, s3
; GFX1032-NEXT: buffer_atomic_sub v0, off, s[8:11], 0 glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: buffer_gl0_inv
; GFX1032-NEXT: buffer_gl1_inv
+; GFX1032-NEXT: buffer_gl0_inv
; GFX1032-NEXT: .LBB8_4:
; GFX1032-NEXT: s_waitcnt_depctr 0xffe3
; GFX1032-NEXT: s_or_b32 exec_lo, exec_lo, s5
@@ -2728,8 +2728,8 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1164-NEXT: s_mov_b32 s9, s3
; GFX1164-NEXT: buffer_atomic_sub_u32 v0, off, s[8:11], 0 glc
; GFX1164-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-NEXT: buffer_gl0_inv
; GFX1164-NEXT: buffer_gl1_inv
+; GFX1164-NEXT: buffer_gl0_inv
; GFX1164-NEXT: .LBB8_4:
; GFX1164-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
@@ -2777,8 +2777,8 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1132-NEXT: s_mov_b32 s9, s3
; GFX1132-NEXT: buffer_atomic_sub_u32 v0, off, s[8:11], 0 glc
; GFX1132-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-NEXT: buffer_gl0_inv
; GFX1132-NEXT: buffer_gl1_inv
+; GFX1132-NEXT: buffer_gl0_inv
; GFX1132-NEXT: .LBB8_4:
; GFX1132-NEXT: s_or_b32 exec_lo, exec_lo, s5
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
@@ -3034,8 +3034,8 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1064-NEXT: s_mov_b32 s9, s3
; GFX1064-NEXT: buffer_atomic_sub_x2 v[0:1], off, s[8:11], 0 glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: buffer_gl0_inv
; GFX1064-NEXT: buffer_gl1_inv
+; GFX1064-NEXT: buffer_gl0_inv
; GFX1064-NEXT: .LBB9_2:
; GFX1064-NEXT: s_waitcnt_depctr 0xffe3
; GFX1064-NEXT: s_or_b64 exec, exec, s[4:5]
@@ -3072,8 +3072,8 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1032-NEXT: s_mov_b32 s9, s3
; GFX1032-NEXT: buffer_atomic_sub_x2 v[0:1], off, s[8:11], 0 glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: buffer_gl0_inv
; GFX1032-NEXT: buffer_gl1_inv
+; GFX1032-NEXT: buffer_gl0_inv
; GFX1032-NEXT: .LBB9_2:
; GFX1032-NEXT: s_waitcnt_depctr 0xffe3
; GFX1032-NEXT: s_or_b32 exec_lo, exec_lo, s4
@@ -3112,8 +3112,8 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1164-NEXT: s_mov_b32 s9, s3
; GFX1164-NEXT: buffer_atomic_sub_u64 v[0:1], off, s[8:11], 0 glc
; GFX1164-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-NEXT: buffer_gl0_inv
; GFX1164-NEXT: buffer_gl1_inv
+; GFX1164-NEXT: buffer_gl0_inv
; GFX1164-NEXT: .LBB9_2:
; GFX1164-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
@@ -3152,8 +3152,8 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1132-NEXT: s_mov_b32 s9, s3
; GFX1132-NEXT: buffer_atomic_sub_u64 v[0:1], off, s[8:11], 0 glc
; GFX1132-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-NEXT: buffer_gl0_inv
; GFX1132-NEXT: buffer_gl1_inv
+; GFX1132-NEXT: buffer_gl0_inv
; GFX1132-NEXT: .LBB9_2:
; GFX1132-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
@@ -3415,8 +3415,8 @@ define amdgpu_kernel void @sub_i64_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1064-NEXT: s_mov_b32 s9, s7
; GFX1064-NEXT: buffer_atomic_sub_x2 v[0:1], off, s[8:11], 0 glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: buffer_gl0_inv
; GFX1064-NEXT: buffer_gl1_inv
+; GFX1064-NEXT: buffer_gl0_inv
; GFX1064-NEXT: .LBB10_2:
; GFX1064-NEXT: s_waitcnt_depctr 0xffe3
; GFX1064-NEXT: s_or_b64 exec, exec, s[0:1]
@@ -3459,8 +3459,8 @@ define amdgpu_kernel void @sub_i64_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1032-NEXT: s_mov_b32 s9, s7
; GFX1032-NEXT: buffer_atomic_sub_x2 v[0:1], off, s[8:11], 0 glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: buffer_gl0_inv
; GFX1032-NEXT: buffer_gl1_inv
+; GFX1032-NEXT: buffer_gl0_inv
; GFX1032-NEXT: .LBB10_2:
; GFX1032-NEXT: s_waitcnt_depctr 0xffe3
; GFX1032-NEXT: s_or_b32 exec_lo, exec_lo, s0
@@ -3505,8 +3505,8 @@ define amdgpu_kernel void @sub_i64_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1164-NEXT: s_mov_b32 s9, s7
; GFX1164-NEXT: buffer_atomic_sub_u64 v[0:1], off, s[8:11], 0 glc
; GFX1164-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-NEXT: buffer_gl0_inv
; GFX1164-NEXT: buffer_gl1_inv
+; GFX1164-NEXT: buffer_gl0_inv
; GFX1164-NEXT: .LBB10_2:
; GFX1164-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
@@ -3553,8 +3553,8 @@ define amdgpu_kernel void @sub_i64_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1132-NEXT: s_mov_b32 s9, s7
; GFX1132-NEXT: buffer_atomic_sub_u64 v[0:1], off, s[8:11], 0 glc
; GFX1132-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-NEXT: buffer_gl0_inv
; GFX1132-NEXT: buffer_gl1_inv
+; GFX1132-NEXT: buffer_gl0_inv
; GFX1132-NEXT: .LBB10_2:
; GFX1132-NEXT: s_or_b32 exec_lo, exec_lo, s2
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
@@ -3722,8 +3722,8 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX10-NEXT: s_mov_b32 s4, s0
; GFX10-NEXT: buffer_atomic_sub_x2 v[0:1], off, s[8:11], 0 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_mov_b32 s5, s1
; GFX10-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX10-NEXT: s_endpgm
@@ -3742,8 +3742,8 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX11-NEXT: s_mov_b32 s4, s0
; GFX11-NEXT: buffer_atomic_sub_u64 v[0:1], off, s[8:11], 0 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_mov_b32 s5, s1
; GFX11-NEXT: buffer_store_b64 v[0:1], off, s[4:7], 0
; GFX11-NEXT: s_nop 0
diff --git a/llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll b/llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll
index e64f8c2..19a1d2d9 100644
--- a/llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll
@@ -75,8 +75,8 @@ define float @syncscope_system(ptr %addr, float %val) #0 {
; GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
; GFX1100-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
; GFX1100-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX1100-NEXT: buffer_gl0_inv
; GFX1100-NEXT: buffer_gl1_inv
+; GFX1100-NEXT: buffer_gl0_inv
; GFX1100-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX1100-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
diff --git a/llvm/test/CodeGen/AMDGPU/directive-amdgcn-target.ll b/llvm/test/CodeGen/AMDGPU/directive-amdgcn-target.ll
index 038219f..2979b52 100644
--- a/llvm/test/CodeGen/AMDGPU/directive-amdgcn-target.ll
+++ b/llvm/test/CodeGen/AMDGPU/directive-amdgcn-target.ll
@@ -110,9 +110,9 @@
; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx9-generic -mattr=-xnack < %s | FileCheck --check-prefixes=GFX9_GENERIC_NOXNACK %s
; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx9-generic -mattr=+xnack < %s | FileCheck --check-prefixes=GFX9_GENERIC_XNACK %s
-; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx10.1-generic -mattr=-xnack < %s | FileCheck --check-prefixes=GFX10_1_GENERIC_NOXNACK %s
-; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx10.1-generic -mattr=+xnack < %s | FileCheck --check-prefixes=GFX10_1_GENERIC_XNACK %s
-; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx10.3-generic < %s | FileCheck --check-prefixes=GFX10_3_GENERIC %s
+; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx10-1-generic -mattr=-xnack < %s | FileCheck --check-prefixes=GFX10_1_GENERIC_NOXNACK %s
+; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx10-1-generic -mattr=+xnack < %s | FileCheck --check-prefixes=GFX10_1_GENERIC_XNACK %s
+; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx10-3-generic < %s | FileCheck --check-prefixes=GFX10_3_GENERIC %s
; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx11-generic < %s | FileCheck --check-prefixes=GFX11_GENERIC %s
; GFX600: .amdgcn_target "amdgcn-amd-amdhsa--gfx600"
@@ -205,9 +205,9 @@
; GFX9_GENERIC_NOXNACK: .amdgcn_target "amdgcn-amd-amdhsa--gfx9-generic:xnack-"
; GFX9_GENERIC_XNACK: .amdgcn_target "amdgcn-amd-amdhsa--gfx9-generic:xnack+"
-; GFX10_1_GENERIC_NOXNACK: .amdgcn_target "amdgcn-amd-amdhsa--gfx10.1-generic:xnack-"
-; GFX10_1_GENERIC_XNACK: .amdgcn_target "amdgcn-amd-amdhsa--gfx10.1-generic:xnack+"
-; GFX10_3_GENERIC: .amdgcn_target "amdgcn-amd-amdhsa--gfx10.3-generic"
+; GFX10_1_GENERIC_NOXNACK: .amdgcn_target "amdgcn-amd-amdhsa--gfx10-1-generic:xnack-"
+; GFX10_1_GENERIC_XNACK: .amdgcn_target "amdgcn-amd-amdhsa--gfx10-1-generic:xnack+"
+; GFX10_3_GENERIC: .amdgcn_target "amdgcn-amd-amdhsa--gfx10-3-generic"
; GFX11_GENERIC: .amdgcn_target "amdgcn-amd-amdhsa--gfx11-generic"
define amdgpu_kernel void @directive_amdgcn_target() {
diff --git a/llvm/test/CodeGen/AMDGPU/elf-header-flags-mach.ll b/llvm/test/CodeGen/AMDGPU/elf-header-flags-mach.ll
index 9ba8176..5f4bfe7 100644
--- a/llvm/test/CodeGen/AMDGPU/elf-header-flags-mach.ll
+++ b/llvm/test/CodeGen/AMDGPU/elf-header-flags-mach.ll
@@ -78,8 +78,8 @@
; RUN: llc -filetype=obj -mtriple=amdgcn -mcpu=gfx1201 < %s | llvm-readobj --file-header - | FileCheck --check-prefixes=ALL,ARCH-GCN,GFX1201 %s
; RUN: llc -filetype=obj --amdhsa-code-object-version=6 -mtriple=amdgcn -mcpu=gfx9-generic < %s | llvm-readobj --file-header - | FileCheck --check-prefixes=ALL,ARCH-GCN,GFX9_GENERIC %s
-; RUN: llc -filetype=obj --amdhsa-code-object-version=6 -mtriple=amdgcn -mcpu=gfx10.1-generic < %s | llvm-readobj --file-header - | FileCheck --check-prefixes=ALL,ARCH-GCN,GFX10_1_GENERIC %s
-; RUN: llc -filetype=obj --amdhsa-code-object-version=6 -mtriple=amdgcn -mcpu=gfx10.3-generic < %s | llvm-readobj --file-header - | FileCheck --check-prefixes=ALL,ARCH-GCN,GFX10_3_GENERIC %s
+; RUN: llc -filetype=obj --amdhsa-code-object-version=6 -mtriple=amdgcn -mcpu=gfx10-1-generic < %s | llvm-readobj --file-header - | FileCheck --check-prefixes=ALL,ARCH-GCN,GFX10_1_GENERIC %s
+; RUN: llc -filetype=obj --amdhsa-code-object-version=6 -mtriple=amdgcn -mcpu=gfx10-3-generic < %s | llvm-readobj --file-header - | FileCheck --check-prefixes=ALL,ARCH-GCN,GFX10_3_GENERIC %s
; RUN: llc -filetype=obj --amdhsa-code-object-version=6 -mtriple=amdgcn -mcpu=gfx11-generic < %s | llvm-readobj --file-header - | FileCheck --check-prefixes=ALL,ARCH-GCN,GFX11_GENERIC %s
; FIXME: With the default attributes the eflags are not accurate for
diff --git a/llvm/test/CodeGen/AMDGPU/generic-targets-require-v6.ll b/llvm/test/CodeGen/AMDGPU/generic-targets-require-v6.ll
index e3f4b14..482f616 100644
--- a/llvm/test/CodeGen/AMDGPU/generic-targets-require-v6.ll
+++ b/llvm/test/CodeGen/AMDGPU/generic-targets-require-v6.ll
@@ -1,16 +1,16 @@
; RUN: not llc -march=amdgcn -mcpu=gfx9-generic --amdhsa-code-object-version=5 -o - %s 2>&1 | FileCheck --check-prefix=GFX9-V5 %s
-; RUN: not llc -march=amdgcn -mcpu=gfx10.1-generic --amdhsa-code-object-version=5 -o - %s 2>&1 | FileCheck --check-prefix=GFX101-V5 %s
-; RUN: not llc -march=amdgcn -mcpu=gfx10.3-generic --amdhsa-code-object-version=5 -o - %s 2>&1 | FileCheck --check-prefix=GFX103-V5 %s
+; RUN: not llc -march=amdgcn -mcpu=gfx10-1-generic --amdhsa-code-object-version=5 -o - %s 2>&1 | FileCheck --check-prefix=GFX101-V5 %s
+; RUN: not llc -march=amdgcn -mcpu=gfx10-3-generic --amdhsa-code-object-version=5 -o - %s 2>&1 | FileCheck --check-prefix=GFX103-V5 %s
; RUN: not llc -march=amdgcn -mcpu=gfx11-generic --amdhsa-code-object-version=5 -o - %s 2>&1 | FileCheck --check-prefix=GFX11-V5 %s
; RUN: llc -march=amdgcn -mcpu=gfx9-generic --amdhsa-code-object-version=6 -o - %s
-; RUN: llc -march=amdgcn -mcpu=gfx10.1-generic --amdhsa-code-object-version=6 -o - %s
-; RUN: llc -march=amdgcn -mcpu=gfx10.3-generic --amdhsa-code-object-version=6 -o - %s
+; RUN: llc -march=amdgcn -mcpu=gfx10-1-generic --amdhsa-code-object-version=6 -o - %s
+; RUN: llc -march=amdgcn -mcpu=gfx10-3-generic --amdhsa-code-object-version=6 -o - %s
; RUN: llc -march=amdgcn -mcpu=gfx11-generic --amdhsa-code-object-version=6 -o - %s
; GFX9-V5: gfx9-generic is only available on code object version 6 or better
-; GFX101-V5: gfx10.1-generic is only available on code object version 6 or better
-; GFX103-V5: gfx10.3-generic is only available on code object version 6 or better
+; GFX101-V5: gfx10-1-generic is only available on code object version 6 or better
+; GFX103-V5: gfx10-3-generic is only available on code object version 6 or better
; GFX11-V5: gfx11-generic is only available on code object version 6 or better
define void @foo() {
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomics-fp.ll b/llvm/test/CodeGen/AMDGPU/global-atomics-fp.ll
index 135285cc..490167e 100644
--- a/llvm/test/CodeGen/AMDGPU/global-atomics-fp.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomics-fp.ll
@@ -156,8 +156,8 @@ define amdgpu_kernel void @global_atomic_fadd_ret_f32(ptr addrspace(1) %ptr) #0
; GFX10-NEXT: v_add_f32_e32 v4, v5, v2
; GFX10-NEXT: global_atomic_cmpswap v1, v3, v[4:5], s[0:1] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v5
; GFX10-NEXT: s_or_b32 s3, vcc_lo, s3
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s3
@@ -196,8 +196,8 @@ define amdgpu_kernel void @global_atomic_fadd_ret_f32(ptr addrspace(1) %ptr) #0
; GFX11-NEXT: v_add_f32_e32 v4, v5, v2
; GFX11-NEXT: global_atomic_cmpswap_b32 v1, v3, v[4:5], s[0:1] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v5
; GFX11-NEXT: s_or_b32 s3, vcc_lo, s3
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s3
@@ -357,8 +357,8 @@ define amdgpu_kernel void @global_atomic_fadd_ret_f32_ieee(ptr addrspace(1) %ptr
; GFX10-NEXT: v_add_f32_e32 v4, v5, v2
; GFX10-NEXT: global_atomic_cmpswap v1, v3, v[4:5], s[0:1] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v5
; GFX10-NEXT: s_or_b32 s3, vcc_lo, s3
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s3
@@ -390,8 +390,8 @@ define amdgpu_kernel void @global_atomic_fadd_ret_f32_ieee(ptr addrspace(1) %ptr
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_add_f32 v1, v2, v1, s[0:1] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: .LBB1_2:
; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s2
; GFX11-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
@@ -506,8 +506,8 @@ define amdgpu_kernel void @global_atomic_fadd_noret_f32(ptr addrspace(1) %ptr) #
; GFX10-NEXT: v_add_f32_e32 v0, v1, v2
; GFX10-NEXT: global_atomic_cmpswap v0, v3, v[0:1], s[0:1] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v1
; GFX10-NEXT: v_mov_b32_e32 v1, v0
; GFX10-NEXT: s_or_b32 s2, vcc_lo, s2
@@ -531,8 +531,8 @@ define amdgpu_kernel void @global_atomic_fadd_noret_f32(ptr addrspace(1) %ptr) #
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_add_f32 v1, v0, s[0:1]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: .LBB2_2:
; GFX11-NEXT: s_endpgm
%result = atomicrmw fadd ptr addrspace(1) %ptr, float 4.0 syncscope("agent") seq_cst
@@ -638,8 +638,8 @@ define amdgpu_kernel void @global_atomic_fadd_noret_f32_ieee(ptr addrspace(1) %p
; GFX10-NEXT: v_add_f32_e32 v0, v1, v2
; GFX10-NEXT: global_atomic_cmpswap v0, v3, v[0:1], s[0:1] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v1
; GFX10-NEXT: v_mov_b32_e32 v1, v0
; GFX10-NEXT: s_or_b32 s2, vcc_lo, s2
@@ -663,8 +663,8 @@ define amdgpu_kernel void @global_atomic_fadd_noret_f32_ieee(ptr addrspace(1) %p
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_add_f32 v1, v0, s[0:1]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: .LBB3_2:
; GFX11-NEXT: s_endpgm
%result = atomicrmw fadd ptr addrspace(1) %ptr, float 4.0 syncscope("agent") seq_cst
@@ -806,8 +806,8 @@ define amdgpu_kernel void @global_atomic_fadd_ret_f32_agent(ptr addrspace(1) %pt
; GFX10-NEXT: v_add_f32_e32 v4, v5, v2
; GFX10-NEXT: global_atomic_cmpswap v1, v3, v[4:5], s[0:1] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v5
; GFX10-NEXT: s_or_b32 s3, vcc_lo, s3
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s3
@@ -838,8 +838,8 @@ define amdgpu_kernel void @global_atomic_fadd_ret_f32_agent(ptr addrspace(1) %pt
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_atomic_add_f32 v1, v2, v1, s[0:1] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: .LBB4_2:
; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s2
; GFX11-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
@@ -1006,8 +1006,8 @@ define amdgpu_kernel void @global_atomic_fadd_ret_f32_system(ptr addrspace(1) %p
; GFX10-NEXT: v_add_f32_e32 v4, v5, v2
; GFX10-NEXT: global_atomic_cmpswap v1, v3, v[4:5], s[0:1] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v5
; GFX10-NEXT: s_or_b32 s3, vcc_lo, s3
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s3
@@ -1046,8 +1046,8 @@ define amdgpu_kernel void @global_atomic_fadd_ret_f32_system(ptr addrspace(1) %p
; GFX11-NEXT: v_add_f32_e32 v4, v5, v2
; GFX11-NEXT: global_atomic_cmpswap_b32 v1, v3, v[4:5], s[0:1] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v5
; GFX11-NEXT: s_or_b32 s3, vcc_lo, s3
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s3
@@ -1325,8 +1325,8 @@ define amdgpu_kernel void @global_atomic_fadd_noret_f32_safe(ptr addrspace(1) %p
; GFX10-NEXT: v_add_f32_e32 v0, v1, v2
; GFX10-NEXT: global_atomic_cmpswap v0, v3, v[0:1], s[0:1] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v1
; GFX10-NEXT: v_mov_b32_e32 v1, v0
; GFX10-NEXT: s_or_b32 s2, vcc_lo, s2
@@ -1357,8 +1357,8 @@ define amdgpu_kernel void @global_atomic_fadd_noret_f32_safe(ptr addrspace(1) %p
; GFX11-NEXT: v_add_f32_e32 v0, v1, v2
; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v3, v[0:1], s[0:1] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v1
; GFX11-NEXT: v_mov_b32_e32 v1, v0
; GFX11-NEXT: s_or_b32 s2, vcc_lo, s2
@@ -1631,8 +1631,8 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_agent(ptr addrspace(1) %p
; GFX10-NEXT: v_and_or_b32 v1, v2, s4, v1
; GFX10-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[0:1] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX10-NEXT: s_or_b32 s3, vcc_lo, s3
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s3
@@ -1669,8 +1669,8 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_agent(ptr addrspace(1) %p
; GFX11-NEXT: v_and_or_b32 v1, v2, s4, v1
; GFX11-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-NEXT: s_or_b32 s3, vcc_lo, s3
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s3
@@ -1819,8 +1819,8 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_system(ptr addrspace(1) %
; GFX10-NEXT: v_and_or_b32 v1, v2, s4, v1
; GFX10-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[0:1] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX10-NEXT: s_or_b32 s3, vcc_lo, s3
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s3
@@ -1857,8 +1857,8 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_system(ptr addrspace(1) %
; GFX11-NEXT: v_and_or_b32 v1, v2, s4, v1
; GFX11-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-NEXT: s_or_b32 s3, vcc_lo, s3
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s3
diff --git a/llvm/test/CodeGen/AMDGPU/global-saddr-atomics-min-max-system.ll b/llvm/test/CodeGen/AMDGPU/global-saddr-atomics-min-max-system.ll
index a847538..6b4a638 100644
--- a/llvm/test/CodeGen/AMDGPU/global-saddr-atomics-min-max-system.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-saddr-atomics-min-max-system.ll
@@ -48,8 +48,8 @@ define amdgpu_ps float @global_max_saddr_i32_rtn(ptr addrspace(1) inreg %sbase,
; GFX10-NEXT: v_max_i32_e32 v4, v5, v1
; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
@@ -75,8 +75,8 @@ define amdgpu_ps float @global_max_saddr_i32_rtn(ptr addrspace(1) inreg %sbase,
; GFX11-NEXT: v_max_i32_e32 v4, v5, v1
; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -131,8 +131,8 @@ define amdgpu_ps float @global_max_saddr_i32_rtn_neg128(ptr addrspace(1) inreg %
; GFX10-NEXT: v_max_i32_e32 v4, v5, v1
; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
@@ -158,8 +158,8 @@ define amdgpu_ps float @global_max_saddr_i32_rtn_neg128(ptr addrspace(1) inreg %
; GFX11-NEXT: v_max_i32_e32 v4, v5, v1
; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -211,8 +211,8 @@ define amdgpu_ps void @global_max_saddr_i32_nortn(ptr addrspace(1) inreg %sbase,
; GFX10-NEXT: v_max_i32_e32 v4, v5, v1
; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX10-NEXT: v_mov_b32_e32 v5, v0
; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
@@ -235,8 +235,8 @@ define amdgpu_ps void @global_max_saddr_i32_nortn(ptr addrspace(1) inreg %sbase,
; GFX11-NEXT: v_max_i32_e32 v4, v5, v1
; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX11-NEXT: v_mov_b32_e32 v5, v0
; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
@@ -286,8 +286,8 @@ define amdgpu_ps void @global_max_saddr_i32_nortn_neg128(ptr addrspace(1) inreg
; GFX10-NEXT: v_max_i32_e32 v4, v5, v1
; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX10-NEXT: v_mov_b32_e32 v5, v0
; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
@@ -310,8 +310,8 @@ define amdgpu_ps void @global_max_saddr_i32_nortn_neg128(ptr addrspace(1) inreg
; GFX11-NEXT: v_max_i32_e32 v4, v5, v1
; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX11-NEXT: v_mov_b32_e32 v5, v0
; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
@@ -372,8 +372,8 @@ define amdgpu_ps <2 x float> @global_max_saddr_i64_rtn(ptr addrspace(1) inreg %s
; GFX10-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
@@ -403,8 +403,8 @@ define amdgpu_ps <2 x float> @global_max_saddr_i64_rtn(ptr addrspace(1) inreg %s
; GFX11-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[5:6], v[7:10], off glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -467,8 +467,8 @@ define amdgpu_ps <2 x float> @global_max_saddr_i64_rtn_neg128(ptr addrspace(1) i
; GFX10-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
@@ -498,8 +498,8 @@ define amdgpu_ps <2 x float> @global_max_saddr_i64_rtn_neg128(ptr addrspace(1) i
; GFX11-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[5:6], v[7:10], off offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -558,8 +558,8 @@ define amdgpu_ps void @global_max_saddr_i64_nortn(ptr addrspace(1) inreg %sbase,
; GFX10-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
; GFX10-NEXT: v_mov_b32_e32 v6, v4
; GFX10-NEXT: v_mov_b32_e32 v5, v3
@@ -585,8 +585,8 @@ define amdgpu_ps void @global_max_saddr_i64_nortn(ptr addrspace(1) inreg %sbase,
; GFX11-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[7:8], v[3:6], off glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
; GFX11-NEXT: v_mov_b32_e32 v6, v4
; GFX11-NEXT: v_mov_b32_e32 v5, v3
@@ -642,8 +642,8 @@ define amdgpu_ps void @global_max_saddr_i64_nortn_neg128(ptr addrspace(1) inreg
; GFX10-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
; GFX10-NEXT: v_mov_b32_e32 v6, v4
; GFX10-NEXT: v_mov_b32_e32 v5, v3
@@ -669,8 +669,8 @@ define amdgpu_ps void @global_max_saddr_i64_nortn_neg128(ptr addrspace(1) inreg
; GFX11-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[7:8], v[3:6], off offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
; GFX11-NEXT: v_mov_b32_e32 v6, v4
; GFX11-NEXT: v_mov_b32_e32 v5, v3
@@ -730,8 +730,8 @@ define amdgpu_ps float @global_min_saddr_i32_rtn(ptr addrspace(1) inreg %sbase,
; GFX10-NEXT: v_min_i32_e32 v4, v5, v1
; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
@@ -757,8 +757,8 @@ define amdgpu_ps float @global_min_saddr_i32_rtn(ptr addrspace(1) inreg %sbase,
; GFX11-NEXT: v_min_i32_e32 v4, v5, v1
; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -813,8 +813,8 @@ define amdgpu_ps float @global_min_saddr_i32_rtn_neg128(ptr addrspace(1) inreg %
; GFX10-NEXT: v_min_i32_e32 v4, v5, v1
; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
@@ -840,8 +840,8 @@ define amdgpu_ps float @global_min_saddr_i32_rtn_neg128(ptr addrspace(1) inreg %
; GFX11-NEXT: v_min_i32_e32 v4, v5, v1
; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -893,8 +893,8 @@ define amdgpu_ps void @global_min_saddr_i32_nortn(ptr addrspace(1) inreg %sbase,
; GFX10-NEXT: v_min_i32_e32 v4, v5, v1
; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX10-NEXT: v_mov_b32_e32 v5, v0
; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
@@ -917,8 +917,8 @@ define amdgpu_ps void @global_min_saddr_i32_nortn(ptr addrspace(1) inreg %sbase,
; GFX11-NEXT: v_min_i32_e32 v4, v5, v1
; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX11-NEXT: v_mov_b32_e32 v5, v0
; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
@@ -968,8 +968,8 @@ define amdgpu_ps void @global_min_saddr_i32_nortn_neg128(ptr addrspace(1) inreg
; GFX10-NEXT: v_min_i32_e32 v4, v5, v1
; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX10-NEXT: v_mov_b32_e32 v5, v0
; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
@@ -992,8 +992,8 @@ define amdgpu_ps void @global_min_saddr_i32_nortn_neg128(ptr addrspace(1) inreg
; GFX11-NEXT: v_min_i32_e32 v4, v5, v1
; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX11-NEXT: v_mov_b32_e32 v5, v0
; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
@@ -1054,8 +1054,8 @@ define amdgpu_ps <2 x float> @global_min_saddr_i64_rtn(ptr addrspace(1) inreg %s
; GFX10-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
@@ -1085,8 +1085,8 @@ define amdgpu_ps <2 x float> @global_min_saddr_i64_rtn(ptr addrspace(1) inreg %s
; GFX11-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[5:6], v[7:10], off glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -1149,8 +1149,8 @@ define amdgpu_ps <2 x float> @global_min_saddr_i64_rtn_neg128(ptr addrspace(1) i
; GFX10-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
@@ -1180,8 +1180,8 @@ define amdgpu_ps <2 x float> @global_min_saddr_i64_rtn_neg128(ptr addrspace(1) i
; GFX11-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[5:6], v[7:10], off offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -1240,8 +1240,8 @@ define amdgpu_ps void @global_min_saddr_i64_nortn(ptr addrspace(1) inreg %sbase,
; GFX10-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
; GFX10-NEXT: v_mov_b32_e32 v6, v4
; GFX10-NEXT: v_mov_b32_e32 v5, v3
@@ -1267,8 +1267,8 @@ define amdgpu_ps void @global_min_saddr_i64_nortn(ptr addrspace(1) inreg %sbase,
; GFX11-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[7:8], v[3:6], off glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
; GFX11-NEXT: v_mov_b32_e32 v6, v4
; GFX11-NEXT: v_mov_b32_e32 v5, v3
@@ -1324,8 +1324,8 @@ define amdgpu_ps void @global_min_saddr_i64_nortn_neg128(ptr addrspace(1) inreg
; GFX10-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
; GFX10-NEXT: v_mov_b32_e32 v6, v4
; GFX10-NEXT: v_mov_b32_e32 v5, v3
@@ -1351,8 +1351,8 @@ define amdgpu_ps void @global_min_saddr_i64_nortn_neg128(ptr addrspace(1) inreg
; GFX11-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[7:8], v[3:6], off offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
; GFX11-NEXT: v_mov_b32_e32 v6, v4
; GFX11-NEXT: v_mov_b32_e32 v5, v3
@@ -1412,8 +1412,8 @@ define amdgpu_ps float @global_umax_saddr_i32_rtn(ptr addrspace(1) inreg %sbase,
; GFX10-NEXT: v_max_u32_e32 v4, v5, v1
; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
@@ -1439,8 +1439,8 @@ define amdgpu_ps float @global_umax_saddr_i32_rtn(ptr addrspace(1) inreg %sbase,
; GFX11-NEXT: v_max_u32_e32 v4, v5, v1
; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -1495,8 +1495,8 @@ define amdgpu_ps float @global_umax_saddr_i32_rtn_neg128(ptr addrspace(1) inreg
; GFX10-NEXT: v_max_u32_e32 v4, v5, v1
; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
@@ -1522,8 +1522,8 @@ define amdgpu_ps float @global_umax_saddr_i32_rtn_neg128(ptr addrspace(1) inreg
; GFX11-NEXT: v_max_u32_e32 v4, v5, v1
; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -1575,8 +1575,8 @@ define amdgpu_ps void @global_umax_saddr_i32_nortn(ptr addrspace(1) inreg %sbase
; GFX10-NEXT: v_max_u32_e32 v4, v5, v1
; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX10-NEXT: v_mov_b32_e32 v5, v0
; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
@@ -1599,8 +1599,8 @@ define amdgpu_ps void @global_umax_saddr_i32_nortn(ptr addrspace(1) inreg %sbase
; GFX11-NEXT: v_max_u32_e32 v4, v5, v1
; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX11-NEXT: v_mov_b32_e32 v5, v0
; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
@@ -1650,8 +1650,8 @@ define amdgpu_ps void @global_umax_saddr_i32_nortn_neg128(ptr addrspace(1) inreg
; GFX10-NEXT: v_max_u32_e32 v4, v5, v1
; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX10-NEXT: v_mov_b32_e32 v5, v0
; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
@@ -1674,8 +1674,8 @@ define amdgpu_ps void @global_umax_saddr_i32_nortn_neg128(ptr addrspace(1) inreg
; GFX11-NEXT: v_max_u32_e32 v4, v5, v1
; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX11-NEXT: v_mov_b32_e32 v5, v0
; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
@@ -1736,8 +1736,8 @@ define amdgpu_ps <2 x float> @global_umax_saddr_i64_rtn(ptr addrspace(1) inreg %
; GFX10-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
@@ -1767,8 +1767,8 @@ define amdgpu_ps <2 x float> @global_umax_saddr_i64_rtn(ptr addrspace(1) inreg %
; GFX11-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[5:6], v[7:10], off glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -1831,8 +1831,8 @@ define amdgpu_ps <2 x float> @global_umax_saddr_i64_rtn_neg128(ptr addrspace(1)
; GFX10-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
@@ -1862,8 +1862,8 @@ define amdgpu_ps <2 x float> @global_umax_saddr_i64_rtn_neg128(ptr addrspace(1)
; GFX11-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[5:6], v[7:10], off offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -1922,8 +1922,8 @@ define amdgpu_ps void @global_umax_saddr_i64_nortn(ptr addrspace(1) inreg %sbase
; GFX10-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
; GFX10-NEXT: v_mov_b32_e32 v6, v4
; GFX10-NEXT: v_mov_b32_e32 v5, v3
@@ -1949,8 +1949,8 @@ define amdgpu_ps void @global_umax_saddr_i64_nortn(ptr addrspace(1) inreg %sbase
; GFX11-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[7:8], v[3:6], off glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
; GFX11-NEXT: v_mov_b32_e32 v6, v4
; GFX11-NEXT: v_mov_b32_e32 v5, v3
@@ -2006,8 +2006,8 @@ define amdgpu_ps void @global_umax_saddr_i64_nortn_neg128(ptr addrspace(1) inreg
; GFX10-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
; GFX10-NEXT: v_mov_b32_e32 v6, v4
; GFX10-NEXT: v_mov_b32_e32 v5, v3
@@ -2033,8 +2033,8 @@ define amdgpu_ps void @global_umax_saddr_i64_nortn_neg128(ptr addrspace(1) inreg
; GFX11-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[7:8], v[3:6], off offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
; GFX11-NEXT: v_mov_b32_e32 v6, v4
; GFX11-NEXT: v_mov_b32_e32 v5, v3
@@ -2094,8 +2094,8 @@ define amdgpu_ps float @global_umin_saddr_i32_rtn(ptr addrspace(1) inreg %sbase,
; GFX10-NEXT: v_min_u32_e32 v4, v5, v1
; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
@@ -2121,8 +2121,8 @@ define amdgpu_ps float @global_umin_saddr_i32_rtn(ptr addrspace(1) inreg %sbase,
; GFX11-NEXT: v_min_u32_e32 v4, v5, v1
; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -2177,8 +2177,8 @@ define amdgpu_ps float @global_umin_saddr_i32_rtn_neg128(ptr addrspace(1) inreg
; GFX10-NEXT: v_min_u32_e32 v4, v5, v1
; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
@@ -2204,8 +2204,8 @@ define amdgpu_ps float @global_umin_saddr_i32_rtn_neg128(ptr addrspace(1) inreg
; GFX11-NEXT: v_min_u32_e32 v4, v5, v1
; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -2257,8 +2257,8 @@ define amdgpu_ps void @global_umin_saddr_i32_nortn(ptr addrspace(1) inreg %sbase
; GFX10-NEXT: v_min_u32_e32 v4, v5, v1
; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX10-NEXT: v_mov_b32_e32 v5, v0
; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
@@ -2281,8 +2281,8 @@ define amdgpu_ps void @global_umin_saddr_i32_nortn(ptr addrspace(1) inreg %sbase
; GFX11-NEXT: v_min_u32_e32 v4, v5, v1
; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX11-NEXT: v_mov_b32_e32 v5, v0
; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
@@ -2332,8 +2332,8 @@ define amdgpu_ps void @global_umin_saddr_i32_nortn_neg128(ptr addrspace(1) inreg
; GFX10-NEXT: v_min_u32_e32 v4, v5, v1
; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX10-NEXT: v_mov_b32_e32 v5, v0
; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
@@ -2356,8 +2356,8 @@ define amdgpu_ps void @global_umin_saddr_i32_nortn_neg128(ptr addrspace(1) inreg
; GFX11-NEXT: v_min_u32_e32 v4, v5, v1
; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
; GFX11-NEXT: v_mov_b32_e32 v5, v0
; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
@@ -2418,8 +2418,8 @@ define amdgpu_ps <2 x float> @global_umin_saddr_i64_rtn(ptr addrspace(1) inreg %
; GFX10-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
@@ -2449,8 +2449,8 @@ define amdgpu_ps <2 x float> @global_umin_saddr_i64_rtn(ptr addrspace(1) inreg %
; GFX11-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[5:6], v[7:10], off glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -2513,8 +2513,8 @@ define amdgpu_ps <2 x float> @global_umin_saddr_i64_rtn_neg128(ptr addrspace(1)
; GFX10-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
@@ -2544,8 +2544,8 @@ define amdgpu_ps <2 x float> @global_umin_saddr_i64_rtn_neg128(ptr addrspace(1)
; GFX11-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[5:6], v[7:10], off offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -2604,8 +2604,8 @@ define amdgpu_ps void @global_umin_saddr_i64_nortn(ptr addrspace(1) inreg %sbase
; GFX10-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
; GFX10-NEXT: v_mov_b32_e32 v6, v4
; GFX10-NEXT: v_mov_b32_e32 v5, v3
@@ -2631,8 +2631,8 @@ define amdgpu_ps void @global_umin_saddr_i64_nortn(ptr addrspace(1) inreg %sbase
; GFX11-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[7:8], v[3:6], off glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
; GFX11-NEXT: v_mov_b32_e32 v6, v4
; GFX11-NEXT: v_mov_b32_e32 v5, v3
@@ -2688,8 +2688,8 @@ define amdgpu_ps void @global_umin_saddr_i64_nortn_neg128(ptr addrspace(1) inreg
; GFX10-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
; GFX10-NEXT: v_mov_b32_e32 v6, v4
; GFX10-NEXT: v_mov_b32_e32 v5, v3
@@ -2715,8 +2715,8 @@ define amdgpu_ps void @global_umin_saddr_i64_nortn_neg128(ptr addrspace(1) inreg
; GFX11-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[7:8], v[3:6], off offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
; GFX11-NEXT: v_mov_b32_e32 v6, v4
; GFX11-NEXT: v_mov_b32_e32 v5, v3
diff --git a/llvm/test/CodeGen/AMDGPU/global-saddr-atomics.ll b/llvm/test/CodeGen/AMDGPU/global-saddr-atomics.ll
index 94d21be..a3c8bb1 100644
--- a/llvm/test/CodeGen/AMDGPU/global-saddr-atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-saddr-atomics.ll
@@ -17,16 +17,16 @@ define amdgpu_ps void @global_xchg_saddr_i32_nortn(ptr addrspace(1) inreg %sbase
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_swap v0, v1, s[2:3]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_xchg_saddr_i32_nortn:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_swap_b32 v0, v1, s[2:3]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -47,16 +47,16 @@ define amdgpu_ps void @global_xchg_saddr_i32_nortn_offset_2047(ptr addrspace(1)
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_swap v0, v1, s[2:3] offset:2047
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_xchg_saddr_i32_nortn_offset_2047:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_swap_b32 v0, v1, s[2:3] offset:2047
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -78,16 +78,16 @@ define amdgpu_ps void @global_xchg_saddr_i32_nortn_offset_neg2048(ptr addrspace(
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_swap v0, v1, s[2:3] offset:-2048
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_xchg_saddr_i32_nortn_offset_neg2048:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_swap_b32 v0, v1, s[2:3] offset:-2048
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -108,16 +108,16 @@ define amdgpu_ps float @global_xchg_saddr_i32_rtn(ptr addrspace(1) inreg %sbase,
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_swap v0, v0, v1, s[2:3] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_xchg_saddr_i32_rtn:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_swap_b32 v0, v0, v1, s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -142,16 +142,16 @@ define amdgpu_ps float @global_xchg_saddr_i32_rtn_2048(ptr addrspace(1) inreg %s
; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc, 0, v3, vcc
; GFX10-NEXT: global_atomic_swap v0, v[2:3], v1, off glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_xchg_saddr_i32_rtn_2048:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_swap_b32 v0, v0, v1, s[2:3] offset:2048 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -173,16 +173,16 @@ define amdgpu_ps float @global_xchg_saddr_i32_rtn_neg2048(ptr addrspace(1) inreg
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_swap v0, v0, v1, s[2:3] offset:-2048 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_xchg_saddr_i32_rtn_neg2048:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_swap_b32 v0, v0, v1, s[2:3] offset:-2048 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -222,8 +222,8 @@ define amdgpu_ps float @global_xchg_saddr_uniform_ptr_in_vgprs_rtn(i32 %voffset,
; GFX10-NEXT: v_readfirstlane_b32 s1, v3
; GFX10-NEXT: global_atomic_swap v0, v0, v1, s[0:1] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_xchg_saddr_uniform_ptr_in_vgprs_rtn:
@@ -235,8 +235,8 @@ define amdgpu_ps float @global_xchg_saddr_uniform_ptr_in_vgprs_rtn(i32 %voffset,
; GFX11-NEXT: v_readfirstlane_b32 s1, v3
; GFX11-NEXT: global_atomic_swap_b32 v0, v0, v1, s[0:1] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%sbase = load ptr addrspace(1), ptr addrspace(3) @ptr.in.lds
%zext.offset = zext i32 %voffset to i64
@@ -270,8 +270,8 @@ define amdgpu_ps float @global_xchg_saddr_uniform_ptr_in_vgprs_rtn_immoffset(i32
; GFX10-NEXT: v_readfirstlane_b32 s1, v3
; GFX10-NEXT: global_atomic_swap v0, v0, v1, s[0:1] offset:42 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_xchg_saddr_uniform_ptr_in_vgprs_rtn_immoffset:
@@ -283,8 +283,8 @@ define amdgpu_ps float @global_xchg_saddr_uniform_ptr_in_vgprs_rtn_immoffset(i32
; GFX11-NEXT: v_readfirstlane_b32 s1, v3
; GFX11-NEXT: global_atomic_swap_b32 v0, v0, v1, s[0:1] offset:42 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%sbase = load ptr addrspace(1), ptr addrspace(3) @ptr.in.lds
%zext.offset = zext i32 %voffset to i64
@@ -319,8 +319,8 @@ define amdgpu_ps void @global_xchg_saddr_uniform_ptr_in_vgprs_nortn(i32 %voffset
; GFX10-NEXT: v_readfirstlane_b32 s1, v3
; GFX10-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_xchg_saddr_uniform_ptr_in_vgprs_nortn:
@@ -332,8 +332,8 @@ define amdgpu_ps void @global_xchg_saddr_uniform_ptr_in_vgprs_nortn(i32 %voffset
; GFX11-NEXT: v_readfirstlane_b32 s1, v3
; GFX11-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%sbase = load ptr addrspace(1), ptr addrspace(3) @ptr.in.lds
%zext.offset = zext i32 %voffset to i64
@@ -366,8 +366,8 @@ define amdgpu_ps void @global_xchg_saddr_uniform_ptr_in_vgprs_nortn_immoffset(i3
; GFX10-NEXT: v_readfirstlane_b32 s1, v3
; GFX10-NEXT: global_atomic_swap v0, v1, s[0:1] offset:42
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_xchg_saddr_uniform_ptr_in_vgprs_nortn_immoffset:
@@ -379,8 +379,8 @@ define amdgpu_ps void @global_xchg_saddr_uniform_ptr_in_vgprs_nortn_immoffset(i3
; GFX11-NEXT: v_readfirstlane_b32 s1, v3
; GFX11-NEXT: global_atomic_swap_b32 v0, v1, s[0:1] offset:42
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%sbase = load ptr addrspace(1), ptr addrspace(3) @ptr.in.lds
%zext.offset = zext i32 %voffset to i64
@@ -410,16 +410,16 @@ define amdgpu_ps <2 x float> @global_xchg_saddr_i64_rtn(ptr addrspace(1) inreg %
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_swap_x2 v[0:1], v0, v[1:2], s[2:3] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_xchg_saddr_i64_rtn:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_swap_b64 v[0:1], v0, v[1:2], s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -440,16 +440,16 @@ define amdgpu_ps <2 x float> @global_xchg_saddr_i64_rtn_neg128(ptr addrspace(1)
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_swap_x2 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_xchg_saddr_i64_rtn_neg128:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_swap_b64 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -471,16 +471,16 @@ define amdgpu_ps void @global_xchg_saddr_i64_nortn(ptr addrspace(1) inreg %sbase
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_swap_x2 v0, v[1:2], s[2:3]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_xchg_saddr_i64_nortn:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_swap_b64 v0, v[1:2], s[2:3]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -500,16 +500,16 @@ define amdgpu_ps void @global_xchg_saddr_i64_nortn_neg128(ptr addrspace(1) inreg
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_swap_x2 v0, v[1:2], s[2:3] offset:-128
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_xchg_saddr_i64_nortn_neg128:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_swap_b64 v0, v[1:2], s[2:3] offset:-128
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -534,16 +534,16 @@ define amdgpu_ps float @global_add_saddr_i32_rtn(ptr addrspace(1) inreg %sbase,
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_add v0, v0, v1, s[2:3] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_add_saddr_i32_rtn:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_add_u32 v0, v0, v1, s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -564,16 +564,16 @@ define amdgpu_ps float @global_add_saddr_i32_rtn_neg128(ptr addrspace(1) inreg %
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_add v0, v0, v1, s[2:3] offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_add_saddr_i32_rtn_neg128:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_add_u32 v0, v0, v1, s[2:3] offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -595,16 +595,16 @@ define amdgpu_ps void @global_add_saddr_i32_nortn(ptr addrspace(1) inreg %sbase,
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_add v0, v1, s[2:3]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_add_saddr_i32_nortn:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_add_u32 v0, v1, s[2:3]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -624,16 +624,16 @@ define amdgpu_ps void @global_add_saddr_i32_nortn_neg128(ptr addrspace(1) inreg
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_add v0, v1, s[2:3] offset:-128
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_add_saddr_i32_nortn_neg128:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_add_u32 v0, v1, s[2:3] offset:-128
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -654,16 +654,16 @@ define amdgpu_ps <2 x float> @global_add_saddr_i64_rtn(ptr addrspace(1) inreg %s
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_add_x2 v[0:1], v0, v[1:2], s[2:3] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_add_saddr_i64_rtn:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_add_u64 v[0:1], v0, v[1:2], s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -684,16 +684,16 @@ define amdgpu_ps <2 x float> @global_add_saddr_i64_rtn_neg128(ptr addrspace(1) i
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_add_x2 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_add_saddr_i64_rtn_neg128:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_add_u64 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -715,16 +715,16 @@ define amdgpu_ps void @global_add_saddr_i64_nortn(ptr addrspace(1) inreg %sbase,
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_add_x2 v0, v[1:2], s[2:3]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_add_saddr_i64_nortn:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_add_u64 v0, v[1:2], s[2:3]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -744,16 +744,16 @@ define amdgpu_ps void @global_add_saddr_i64_nortn_neg128(ptr addrspace(1) inreg
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_add_x2 v0, v[1:2], s[2:3] offset:-128
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_add_saddr_i64_nortn_neg128:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_add_u64 v0, v[1:2], s[2:3] offset:-128
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -778,16 +778,16 @@ define amdgpu_ps float @global_sub_saddr_i32_rtn(ptr addrspace(1) inreg %sbase,
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_sub v0, v0, v1, s[2:3] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_sub_saddr_i32_rtn:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_sub_u32 v0, v0, v1, s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -808,16 +808,16 @@ define amdgpu_ps float @global_sub_saddr_i32_rtn_neg128(ptr addrspace(1) inreg %
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_sub v0, v0, v1, s[2:3] offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_sub_saddr_i32_rtn_neg128:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_sub_u32 v0, v0, v1, s[2:3] offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -839,16 +839,16 @@ define amdgpu_ps void @global_sub_saddr_i32_nortn(ptr addrspace(1) inreg %sbase,
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_sub v0, v1, s[2:3]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_sub_saddr_i32_nortn:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_sub_u32 v0, v1, s[2:3]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -868,16 +868,16 @@ define amdgpu_ps void @global_sub_saddr_i32_nortn_neg128(ptr addrspace(1) inreg
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_sub v0, v1, s[2:3] offset:-128
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_sub_saddr_i32_nortn_neg128:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_sub_u32 v0, v1, s[2:3] offset:-128
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -898,16 +898,16 @@ define amdgpu_ps <2 x float> @global_sub_saddr_i64_rtn(ptr addrspace(1) inreg %s
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_sub_x2 v[0:1], v0, v[1:2], s[2:3] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_sub_saddr_i64_rtn:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_sub_u64 v[0:1], v0, v[1:2], s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -928,16 +928,16 @@ define amdgpu_ps <2 x float> @global_sub_saddr_i64_rtn_neg128(ptr addrspace(1) i
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_sub_x2 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_sub_saddr_i64_rtn_neg128:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_sub_u64 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -959,16 +959,16 @@ define amdgpu_ps void @global_sub_saddr_i64_nortn(ptr addrspace(1) inreg %sbase,
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_sub_x2 v0, v[1:2], s[2:3]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_sub_saddr_i64_nortn:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_sub_u64 v0, v[1:2], s[2:3]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -988,16 +988,16 @@ define amdgpu_ps void @global_sub_saddr_i64_nortn_neg128(ptr addrspace(1) inreg
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_sub_x2 v0, v[1:2], s[2:3] offset:-128
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_sub_saddr_i64_nortn_neg128:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_sub_u64 v0, v[1:2], s[2:3] offset:-128
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -1022,16 +1022,16 @@ define amdgpu_ps float @global_and_saddr_i32_rtn(ptr addrspace(1) inreg %sbase,
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_and v0, v0, v1, s[2:3] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_and_saddr_i32_rtn:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_and_b32 v0, v0, v1, s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -1052,16 +1052,16 @@ define amdgpu_ps float @global_and_saddr_i32_rtn_neg128(ptr addrspace(1) inreg %
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_and v0, v0, v1, s[2:3] offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_and_saddr_i32_rtn_neg128:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_and_b32 v0, v0, v1, s[2:3] offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -1083,16 +1083,16 @@ define amdgpu_ps void @global_and_saddr_i32_nortn(ptr addrspace(1) inreg %sbase,
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_and v0, v1, s[2:3]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_and_saddr_i32_nortn:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_and_b32 v0, v1, s[2:3]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -1112,16 +1112,16 @@ define amdgpu_ps void @global_and_saddr_i32_nortn_neg128(ptr addrspace(1) inreg
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_and v0, v1, s[2:3] offset:-128
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_and_saddr_i32_nortn_neg128:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_and_b32 v0, v1, s[2:3] offset:-128
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -1142,16 +1142,16 @@ define amdgpu_ps <2 x float> @global_and_saddr_i64_rtn(ptr addrspace(1) inreg %s
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_and_x2 v[0:1], v0, v[1:2], s[2:3] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_and_saddr_i64_rtn:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_and_b64 v[0:1], v0, v[1:2], s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -1172,16 +1172,16 @@ define amdgpu_ps <2 x float> @global_and_saddr_i64_rtn_neg128(ptr addrspace(1) i
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_and_x2 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_and_saddr_i64_rtn_neg128:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_and_b64 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -1203,16 +1203,16 @@ define amdgpu_ps void @global_and_saddr_i64_nortn(ptr addrspace(1) inreg %sbase,
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_and_x2 v0, v[1:2], s[2:3]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_and_saddr_i64_nortn:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_and_b64 v0, v[1:2], s[2:3]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -1232,16 +1232,16 @@ define amdgpu_ps void @global_and_saddr_i64_nortn_neg128(ptr addrspace(1) inreg
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_and_x2 v0, v[1:2], s[2:3] offset:-128
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_and_saddr_i64_nortn_neg128:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_and_b64 v0, v[1:2], s[2:3] offset:-128
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -1266,16 +1266,16 @@ define amdgpu_ps float @global_or_saddr_i32_rtn(ptr addrspace(1) inreg %sbase, i
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_or v0, v0, v1, s[2:3] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_or_saddr_i32_rtn:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_or_b32 v0, v0, v1, s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -1296,16 +1296,16 @@ define amdgpu_ps float @global_or_saddr_i32_rtn_neg128(ptr addrspace(1) inreg %s
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_or v0, v0, v1, s[2:3] offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_or_saddr_i32_rtn_neg128:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_or_b32 v0, v0, v1, s[2:3] offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -1327,16 +1327,16 @@ define amdgpu_ps void @global_or_saddr_i32_nortn(ptr addrspace(1) inreg %sbase,
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_or v0, v1, s[2:3]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_or_saddr_i32_nortn:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_or_b32 v0, v1, s[2:3]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -1356,16 +1356,16 @@ define amdgpu_ps void @global_or_saddr_i32_nortn_neg128(ptr addrspace(1) inreg %
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_or v0, v1, s[2:3] offset:-128
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_or_saddr_i32_nortn_neg128:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_or_b32 v0, v1, s[2:3] offset:-128
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -1386,16 +1386,16 @@ define amdgpu_ps <2 x float> @global_or_saddr_i64_rtn(ptr addrspace(1) inreg %sb
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_or_x2 v[0:1], v0, v[1:2], s[2:3] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_or_saddr_i64_rtn:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_or_b64 v[0:1], v0, v[1:2], s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -1416,16 +1416,16 @@ define amdgpu_ps <2 x float> @global_or_saddr_i64_rtn_neg128(ptr addrspace(1) in
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_or_x2 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_or_saddr_i64_rtn_neg128:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_or_b64 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -1447,16 +1447,16 @@ define amdgpu_ps void @global_or_saddr_i64_nortn(ptr addrspace(1) inreg %sbase,
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_or_x2 v0, v[1:2], s[2:3]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_or_saddr_i64_nortn:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_or_b64 v0, v[1:2], s[2:3]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -1476,16 +1476,16 @@ define amdgpu_ps void @global_or_saddr_i64_nortn_neg128(ptr addrspace(1) inreg %
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_or_x2 v0, v[1:2], s[2:3] offset:-128
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_or_saddr_i64_nortn_neg128:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_or_b64 v0, v[1:2], s[2:3] offset:-128
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -1510,16 +1510,16 @@ define amdgpu_ps float @global_xor_saddr_i32_rtn(ptr addrspace(1) inreg %sbase,
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_xor v0, v0, v1, s[2:3] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_xor_saddr_i32_rtn:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_xor_b32 v0, v0, v1, s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -1540,16 +1540,16 @@ define amdgpu_ps float @global_xor_saddr_i32_rtn_neg128(ptr addrspace(1) inreg %
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_xor v0, v0, v1, s[2:3] offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_xor_saddr_i32_rtn_neg128:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_xor_b32 v0, v0, v1, s[2:3] offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -1571,16 +1571,16 @@ define amdgpu_ps void @global_xor_saddr_i32_nortn(ptr addrspace(1) inreg %sbase,
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_xor v0, v1, s[2:3]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_xor_saddr_i32_nortn:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_xor_b32 v0, v1, s[2:3]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -1600,16 +1600,16 @@ define amdgpu_ps void @global_xor_saddr_i32_nortn_neg128(ptr addrspace(1) inreg
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_xor v0, v1, s[2:3] offset:-128
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_xor_saddr_i32_nortn_neg128:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_xor_b32 v0, v1, s[2:3] offset:-128
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -1630,16 +1630,16 @@ define amdgpu_ps <2 x float> @global_xor_saddr_i64_rtn(ptr addrspace(1) inreg %s
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_xor_x2 v[0:1], v0, v[1:2], s[2:3] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_xor_saddr_i64_rtn:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_xor_b64 v[0:1], v0, v[1:2], s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -1660,16 +1660,16 @@ define amdgpu_ps <2 x float> @global_xor_saddr_i64_rtn_neg128(ptr addrspace(1) i
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_xor_x2 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_xor_saddr_i64_rtn_neg128:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_xor_b64 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -1691,16 +1691,16 @@ define amdgpu_ps void @global_xor_saddr_i64_nortn(ptr addrspace(1) inreg %sbase,
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_xor_x2 v0, v[1:2], s[2:3]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_xor_saddr_i64_nortn:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_xor_b64 v0, v[1:2], s[2:3]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -1720,16 +1720,16 @@ define amdgpu_ps void @global_xor_saddr_i64_nortn_neg128(ptr addrspace(1) inreg
; GFX10: ; %bb.0:
; GFX10-NEXT: global_atomic_xor_x2 v0, v[1:2], s[2:3] offset:-128
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_xor_saddr_i64_nortn_neg128:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_atomic_xor_b64 v0, v[1:2], s[2:3] offset:-128
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -2620,8 +2620,8 @@ define amdgpu_ps float @global_cmpxchg_saddr_i32_rtn(ptr addrspace(1) inreg %sba
; GFX10-NEXT: v_mov_b32_e32 v3, v1
; GFX10-NEXT: global_atomic_cmpswap v0, v0, v[2:3], s[2:3] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_cmpxchg_saddr_i32_rtn:
@@ -2629,8 +2629,8 @@ define amdgpu_ps float @global_cmpxchg_saddr_i32_rtn(ptr addrspace(1) inreg %sba
; GFX11-NEXT: v_mov_b32_e32 v3, v1
; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v0, v[2:3], s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -2654,8 +2654,8 @@ define amdgpu_ps float @global_cmpxchg_saddr_i32_rtn_neg128(ptr addrspace(1) inr
; GFX10-NEXT: v_mov_b32_e32 v3, v1
; GFX10-NEXT: global_atomic_cmpswap v0, v0, v[2:3], s[2:3] offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_cmpxchg_saddr_i32_rtn_neg128:
@@ -2663,8 +2663,8 @@ define amdgpu_ps float @global_cmpxchg_saddr_i32_rtn_neg128(ptr addrspace(1) inr
; GFX11-NEXT: v_mov_b32_e32 v3, v1
; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v0, v[2:3], s[2:3] offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -2689,8 +2689,8 @@ define amdgpu_ps void @global_cmpxchg_saddr_i32_nortn(ptr addrspace(1) inreg %sb
; GFX10-NEXT: v_mov_b32_e32 v3, v1
; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], s[2:3]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_cmpxchg_saddr_i32_nortn:
@@ -2698,8 +2698,8 @@ define amdgpu_ps void @global_cmpxchg_saddr_i32_nortn(ptr addrspace(1) inreg %sb
; GFX11-NEXT: v_mov_b32_e32 v3, v1
; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], s[2:3]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -2721,8 +2721,8 @@ define amdgpu_ps void @global_cmpxchg_saddr_i32_nortn_neg128(ptr addrspace(1) in
; GFX10-NEXT: v_mov_b32_e32 v3, v1
; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], s[2:3] offset:-128
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_cmpxchg_saddr_i32_nortn_neg128:
@@ -2730,8 +2730,8 @@ define amdgpu_ps void @global_cmpxchg_saddr_i32_nortn_neg128(ptr addrspace(1) in
; GFX11-NEXT: v_mov_b32_e32 v3, v1
; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], s[2:3] offset:-128
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -2756,8 +2756,8 @@ define amdgpu_ps <2 x float> @global_cmpxchg_saddr_i64_rtn(ptr addrspace(1) inre
; GFX10-NEXT: v_mov_b32_e32 v5, v1
; GFX10-NEXT: global_atomic_cmpswap_x2 v[0:1], v0, v[3:6], s[2:3] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_cmpxchg_saddr_i64_rtn:
@@ -2766,8 +2766,8 @@ define amdgpu_ps <2 x float> @global_cmpxchg_saddr_i64_rtn(ptr addrspace(1) inre
; GFX11-NEXT: v_mov_b32_e32 v5, v1
; GFX11-NEXT: global_atomic_cmpswap_b64 v[0:1], v0, v[3:6], s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -2793,8 +2793,8 @@ define amdgpu_ps <2 x float> @global_cmpxchg_saddr_i64_rtn_neg128(ptr addrspace(
; GFX10-NEXT: v_mov_b32_e32 v5, v1
; GFX10-NEXT: global_atomic_cmpswap_x2 v[0:1], v0, v[3:6], s[2:3] offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_cmpxchg_saddr_i64_rtn_neg128:
@@ -2803,8 +2803,8 @@ define amdgpu_ps <2 x float> @global_cmpxchg_saddr_i64_rtn_neg128(ptr addrspace(
; GFX11-NEXT: v_mov_b32_e32 v5, v1
; GFX11-NEXT: global_atomic_cmpswap_b64 v[0:1], v0, v[3:6], s[2:3] offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -2831,8 +2831,8 @@ define amdgpu_ps void @global_cmpxchg_saddr_i64_nortn(ptr addrspace(1) inreg %sb
; GFX10-NEXT: v_mov_b32_e32 v5, v1
; GFX10-NEXT: global_atomic_cmpswap_x2 v0, v[3:6], s[2:3]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_cmpxchg_saddr_i64_nortn:
@@ -2841,8 +2841,8 @@ define amdgpu_ps void @global_cmpxchg_saddr_i64_nortn(ptr addrspace(1) inreg %sb
; GFX11-NEXT: v_mov_b32_e32 v5, v1
; GFX11-NEXT: global_atomic_cmpswap_b64 v0, v[3:6], s[2:3]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -2866,8 +2866,8 @@ define amdgpu_ps void @global_cmpxchg_saddr_i64_nortn_neg128(ptr addrspace(1) in
; GFX10-NEXT: v_mov_b32_e32 v5, v1
; GFX10-NEXT: global_atomic_cmpswap_x2 v0, v[3:6], s[2:3] offset:-128
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_cmpxchg_saddr_i64_nortn_neg128:
@@ -2876,8 +2876,8 @@ define amdgpu_ps void @global_cmpxchg_saddr_i64_nortn_neg128(ptr addrspace(1) in
; GFX11-NEXT: v_mov_b32_e32 v5, v1
; GFX11-NEXT: global_atomic_cmpswap_b64 v0, v[3:6], s[2:3] offset:-128
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
diff --git a/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll b/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll
index 6d99485..d9cbbc1 100644
--- a/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll
@@ -3575,16 +3575,16 @@ define amdgpu_ps float @atomic_global_load_saddr_i32(ptr addrspace(1) inreg %sba
; GFX10: ; %bb.0:
; GFX10-NEXT: global_load_dword v0, v0, s[2:3] glc dlc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: atomic_global_load_saddr_i32:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_load_b32 v0, v0, s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
;
; GFX12-LABEL: atomic_global_load_saddr_i32:
@@ -3612,16 +3612,16 @@ define amdgpu_ps float @atomic_global_load_saddr_i32_immneg128(ptr addrspace(1)
; GFX10: ; %bb.0:
; GFX10-NEXT: global_load_dword v0, v0, s[2:3] offset:-128 glc dlc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: atomic_global_load_saddr_i32_immneg128:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_load_b32 v0, v0, s[2:3] offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
;
; GFX12-LABEL: atomic_global_load_saddr_i32_immneg128:
@@ -3650,16 +3650,16 @@ define amdgpu_ps <2 x float> @atomic_global_load_saddr_i64(ptr addrspace(1) inre
; GFX10: ; %bb.0:
; GFX10-NEXT: global_load_dwordx2 v[0:1], v0, s[2:3] glc dlc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: atomic_global_load_saddr_i64:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_load_b64 v[0:1], v0, s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
;
; GFX12-LABEL: atomic_global_load_saddr_i64:
@@ -3687,16 +3687,16 @@ define amdgpu_ps <2 x float> @atomic_global_load_saddr_i64_immneg128(ptr addrspa
; GFX10: ; %bb.0:
; GFX10-NEXT: global_load_dwordx2 v[0:1], v0, s[2:3] offset:-128 glc dlc
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: atomic_global_load_saddr_i64_immneg128:
; GFX11: ; %bb.0:
; GFX11-NEXT: global_load_b64 v[0:1], v0, s[2:3] offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: ; return to shader part epilog
;
; GFX12-LABEL: atomic_global_load_saddr_i64_immneg128:
diff --git a/llvm/test/CodeGen/AMDGPU/hsa-generic-target-features.ll b/llvm/test/CodeGen/AMDGPU/hsa-generic-target-features.ll
index 4fee563..a2d9bbf 100644
--- a/llvm/test/CodeGen/AMDGPU/hsa-generic-target-features.ll
+++ b/llvm/test/CodeGen/AMDGPU/hsa-generic-target-features.ll
@@ -1,14 +1,14 @@
-; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx10.1-generic -mattr=+cumode < %s | FileCheck -check-prefix=NOCU %s
-; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx10.1-generic < %s | FileCheck -check-prefix=CU %s
-; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx10.3-generic -mattr=+cumode < %s | FileCheck -check-prefix=NOCU %s
-; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx10.3-generic < %s | FileCheck -check-prefix=CU %s
+; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx10-1-generic -mattr=+cumode < %s | FileCheck -check-prefix=NOCU %s
+; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx10-1-generic < %s | FileCheck -check-prefix=CU %s
+; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx10-3-generic -mattr=+cumode < %s | FileCheck -check-prefix=NOCU %s
+; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx10-3-generic < %s | FileCheck -check-prefix=CU %s
; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx11-generic -mattr=+cumode < %s | FileCheck -check-prefix=NOCU %s
; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx11-generic < %s | FileCheck -check-prefix=CU %s
-; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx10.1-generic -mattr=+wavefrontsize32,-wavefrontsize64 < %s | FileCheck -check-prefix=W32 %s
-; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx10.1-generic -mattr=-wavefrontsize32,+wavefrontsize64 < %s | FileCheck -check-prefix=W64 %s
-; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx10.3-generic -mattr=+wavefrontsize32,-wavefrontsize64 < %s | FileCheck -check-prefix=W32 %s
-; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx10.3-generic -mattr=-wavefrontsize32,+wavefrontsize64 < %s | FileCheck -check-prefix=W64 %s
+; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx10-1-generic -mattr=+wavefrontsize32,-wavefrontsize64 < %s | FileCheck -check-prefix=W32 %s
+; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx10-1-generic -mattr=-wavefrontsize32,+wavefrontsize64 < %s | FileCheck -check-prefix=W64 %s
+; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx10-3-generic -mattr=+wavefrontsize32,-wavefrontsize64 < %s | FileCheck -check-prefix=W32 %s
+; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx10-3-generic -mattr=-wavefrontsize32,+wavefrontsize64 < %s | FileCheck -check-prefix=W64 %s
; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx11-generic -mattr=+wavefrontsize32,-wavefrontsize64 < %s | FileCheck -check-prefix=W32 %s
; RUN: llc --amdhsa-code-object-version=6 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx11-generic -mattr=-wavefrontsize32,+wavefrontsize64 < %s | FileCheck -check-prefix=W64 %s
diff --git a/llvm/test/CodeGen/AMDGPU/insert-waitcnts-crash.ll b/llvm/test/CodeGen/AMDGPU/insert-waitcnts-crash.ll
index 833aba9..9e336a7 100644
--- a/llvm/test/CodeGen/AMDGPU/insert-waitcnts-crash.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert-waitcnts-crash.ll
@@ -23,8 +23,8 @@ define fastcc i32 @foo() {
; CHECK-NEXT: $sgpr17 = S_ADDC_U32 internal $sgpr17, target-flags(amdgpu-gotprel32-hi) @bar + 12, implicit-def $scc, implicit internal $scc
; CHECK-NEXT: }
; CHECK-NEXT: S_WAITCNT_VSCNT undef $sgpr_null, 0
- ; CHECK-NEXT: BUFFER_GL0_INV implicit $exec
; CHECK-NEXT: BUFFER_GL1_INV implicit $exec
+ ; CHECK-NEXT: BUFFER_GL0_INV implicit $exec
; CHECK-NEXT: renamable $sgpr16_sgpr17 = S_LOAD_DWORDX2_IMM killed renamable $sgpr16_sgpr17, 0, 0 :: (dereferenceable invariant load (s64) from got, addrspace 4)
; CHECK-NEXT: $vgpr40 = V_WRITELANE_B32 killed $sgpr30, 0, killed $vgpr40
; CHECK-NEXT: $vgpr40 = V_WRITELANE_B32 killed $sgpr31, 1, killed $vgpr40
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.gather4.d16.dim.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.gather4.d16.dim.ll
index cf324d6..3a5a608 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.gather4.d16.dim.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.gather4.d16.dim.ll
@@ -3,7 +3,7 @@
; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs | FileCheck -check-prefixes=GCN,GFX9 %s
; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx9-generic --amdhsa-code-object-version=6 -verify-machineinstrs | FileCheck -check-prefixes=GCN,GFX9 %s
; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs | FileCheck -check-prefixes=GCN,GFX10 %s
-; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx10.1-generic --amdhsa-code-object-version=6 -verify-machineinstrs | FileCheck -check-prefixes=GCN,GFX10 %s
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx10-1-generic --amdhsa-code-object-version=6 -verify-machineinstrs | FileCheck -check-prefixes=GCN,GFX10 %s
; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs | FileCheck -check-prefixes=GCN,GFX10 %s
; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx11-generic --amdhsa-code-object-version=6 -verify-machineinstrs | FileCheck -check-prefixes=GCN,GFX10 %s
; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs | FileCheck -check-prefixes=GCN,GFX12 %s
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.dim.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.dim.ll
index 10e1ae3..c8421c6 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.dim.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.dim.ll
@@ -3,7 +3,7 @@
; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX6789 %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx9-generic --amdhsa-code-object-version=6 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX6789 %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10PLUS,GFX10 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx10.1-generic --amdhsa-code-object-version=6 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10PLUS,GFX10 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx10-1-generic --amdhsa-code-object-version=6 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10PLUS,GFX10 %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11 %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx11-generic --amdhsa-code-object-version=6 -amdgpu-enable-delay-alu=0 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11 %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -amdgpu-enable-delay-alu=0 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12 %s
diff --git a/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll b/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll
index 138a6a8..0bb5288 100644
--- a/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll
+++ b/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll
@@ -273,11 +273,11 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
; CHECK-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; CHECK-NEXT: ds_write_b32 v0, v58
; CHECK-NEXT: s_branch .LBB0_7
-; CHECK-NEXT: .LBB0_16: ; %Flow43
+; CHECK-NEXT: .LBB0_16: ; %Flow45
; CHECK-NEXT: ; in Loop: Header=BB0_5 Depth=1
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s53
; CHECK-NEXT: v_mov_b32_e32 v57, v0
-; CHECK-NEXT: .LBB0_17: ; %Flow44
+; CHECK-NEXT: .LBB0_17: ; %Flow46
; CHECK-NEXT: ; in Loop: Header=BB0_5 Depth=1
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s52
; CHECK-NEXT: s_mov_b32 s49, exec_lo
@@ -323,11 +323,11 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
; CHECK-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; CHECK-NEXT: ds_write_b32 v0, v57
; CHECK-NEXT: s_branch .LBB0_19
-; CHECK-NEXT: .LBB0_22: ; %Flow41
+; CHECK-NEXT: .LBB0_22: ; %Flow43
; CHECK-NEXT: ; in Loop: Header=BB0_5 Depth=1
; CHECK-NEXT: s_inst_prefetch 0x2
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s52
-; CHECK-NEXT: .LBB0_23: ; %Flow42
+; CHECK-NEXT: .LBB0_23: ; %Flow44
; CHECK-NEXT: ; in Loop: Header=BB0_5 Depth=1
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s49
; CHECK-NEXT: ; %bb.24: ; in Loop: Header=BB0_5 Depth=1
@@ -340,7 +340,7 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
; CHECK-NEXT: s_or_b32 s43, s4, s43
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s43
; CHECK-NEXT: s_cbranch_execnz .LBB0_5
-; CHECK-NEXT: .LBB0_25: ; %Flow49
+; CHECK-NEXT: .LBB0_25: ; %Flow51
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s42
; CHECK-NEXT: v_mov_b32_e32 v31, v40
; CHECK-NEXT: v_mov_b32_e32 v0, 1
@@ -362,12 +362,10 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
; CHECK-NEXT: v_cmpx_gt_u32_e64 v47, v41
; CHECK-NEXT: s_cbranch_execz .LBB0_33
; CHECK-NEXT: ; %bb.26:
-; CHECK-NEXT: s_add_u32 s42, s44, 8
-; CHECK-NEXT: s_addc_u32 s43, s45, 0
-; CHECK-NEXT: s_mov_b32 s44, 0
+; CHECK-NEXT: s_mov_b32 s42, 0
; CHECK-NEXT: s_branch .LBB0_28
; CHECK-NEXT: .LBB0_27: ; in Loop: Header=BB0_28 Depth=1
-; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s45
+; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s43
; CHECK-NEXT: v_mov_b32_e32 v31, v40
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: s_add_u32 s8, s34, 40
@@ -383,12 +381,12 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
; CHECK-NEXT: s_swappc_b64 s[30:31], s[6:7]
; CHECK-NEXT: v_add_co_u32 v41, vcc_lo, v0, v41
; CHECK-NEXT: v_cmp_le_u32_e32 vcc_lo, v47, v41
-; CHECK-NEXT: s_or_b32 s44, vcc_lo, s44
-; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; CHECK-NEXT: s_or_b32 s42, vcc_lo, s42
+; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s42
; CHECK-NEXT: s_cbranch_execz .LBB0_33
; CHECK-NEXT: .LBB0_28: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: v_lshlrev_b32_e32 v0, 2, v41
-; CHECK-NEXT: s_mov_b32 s45, exec_lo
+; CHECK-NEXT: s_mov_b32 s43, exec_lo
; CHECK-NEXT: ds_read_b32 v0, v0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: v_lshrrev_b32_e32 v63, 10, v0
@@ -397,15 +395,15 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
; CHECK-NEXT: v_mul_u32_u24_e32 v1, 0x180, v63
; CHECK-NEXT: v_lshlrev_b32_e32 v0, 5, v62
; CHECK-NEXT: v_lshlrev_b32_e32 v4, 5, v72
-; CHECK-NEXT: v_add_co_u32 v2, s4, s42, v1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, s43, 0, s4
+; CHECK-NEXT: v_add_co_u32 v2, s4, s44, v1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, s45, 0, s4
; CHECK-NEXT: v_add_co_u32 v0, vcc_lo, v2, v0
; CHECK-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v3, vcc_lo
; CHECK-NEXT: v_add_co_u32 v2, vcc_lo, v2, v4
; CHECK-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: global_load_dwordx4 v[4:7], v[0:1], off
-; CHECK-NEXT: global_load_dwordx4 v[8:11], v[2:3], off
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[0:1], off offset:8
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v[2:3], off offset:8
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_xor_b32_e32 v46, v9, v5
; CHECK-NEXT: v_xor_b32_e32 v45, v8, v4
@@ -417,8 +415,8 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
; CHECK-NEXT: s_cbranch_execz .LBB0_27
; CHECK-NEXT: ; %bb.29: ; in Loop: Header=BB0_28 Depth=1
; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: global_load_dwordx2 v[58:59], v[2:3], off offset:16
-; CHECK-NEXT: global_load_dwordx2 v[60:61], v[0:1], off offset:16
+; CHECK-NEXT: global_load_dwordx2 v[58:59], v[2:3], off offset:24
+; CHECK-NEXT: global_load_dwordx2 v[60:61], v[0:1], off offset:24
; CHECK-NEXT: v_lshlrev_b32_e32 v0, 4, v45
; CHECK-NEXT: v_alignbit_b32 v1, v46, v45, 12
; CHECK-NEXT: v_and_b32_e32 v2, 0xf0000, v45
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-fence.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-fence.ll
index 89ccf97..77962fa 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-fence.ll
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-fence.ll
@@ -1224,14 +1224,14 @@ define amdgpu_kernel void @agent_acquire_fence() {
;
; GFX10-WGP-LABEL: agent_acquire_fence:
; GFX10-WGP: ; %bb.0: ; %entry
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: agent_acquire_fence:
; GFX10-CU: ; %bb.0: ; %entry
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: agent_acquire_fence:
@@ -1260,14 +1260,14 @@ define amdgpu_kernel void @agent_acquire_fence() {
;
; GFX11-WGP-LABEL: agent_acquire_fence:
; GFX11-WGP: ; %bb.0: ; %entry
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: agent_acquire_fence:
; GFX11-CU: ; %bb.0: ; %entry
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
entry:
fence syncscope("agent") acquire
@@ -1338,14 +1338,14 @@ define amdgpu_kernel void @agent_acq_rel_fence() {
;
; GFX10-WGP-LABEL: agent_acq_rel_fence:
; GFX10-WGP: ; %bb.0: ; %entry
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: agent_acq_rel_fence:
; GFX10-CU: ; %bb.0: ; %entry
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: agent_acq_rel_fence:
@@ -1376,14 +1376,14 @@ define amdgpu_kernel void @agent_acq_rel_fence() {
;
; GFX11-WGP-LABEL: agent_acq_rel_fence:
; GFX11-WGP: ; %bb.0: ; %entry
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: agent_acq_rel_fence:
; GFX11-CU: ; %bb.0: ; %entry
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
entry:
fence syncscope("agent") acq_rel
@@ -1403,14 +1403,14 @@ define amdgpu_kernel void @agent_seq_cst_fence() {
;
; GFX10-WGP-LABEL: agent_seq_cst_fence:
; GFX10-WGP: ; %bb.0: ; %entry
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: agent_seq_cst_fence:
; GFX10-CU: ; %bb.0: ; %entry
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: agent_seq_cst_fence:
@@ -1441,14 +1441,14 @@ define amdgpu_kernel void @agent_seq_cst_fence() {
;
; GFX11-WGP-LABEL: agent_seq_cst_fence:
; GFX11-WGP: ; %bb.0: ; %entry
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: agent_seq_cst_fence:
; GFX11-CU: ; %bb.0: ; %entry
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
entry:
fence syncscope("agent") seq_cst
@@ -1468,14 +1468,14 @@ define amdgpu_kernel void @agent_one_as_acquire_fence() {
;
; GFX10-WGP-LABEL: agent_one_as_acquire_fence:
; GFX10-WGP: ; %bb.0: ; %entry
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: agent_one_as_acquire_fence:
; GFX10-CU: ; %bb.0: ; %entry
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: agent_one_as_acquire_fence:
@@ -1504,14 +1504,14 @@ define amdgpu_kernel void @agent_one_as_acquire_fence() {
;
; GFX11-WGP-LABEL: agent_one_as_acquire_fence:
; GFX11-WGP: ; %bb.0: ; %entry
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: agent_one_as_acquire_fence:
; GFX11-CU: ; %bb.0: ; %entry
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
entry:
fence syncscope("agent-one-as") acquire
@@ -1582,14 +1582,14 @@ define amdgpu_kernel void @agent_one_as_acq_rel_fence() {
;
; GFX10-WGP-LABEL: agent_one_as_acq_rel_fence:
; GFX10-WGP: ; %bb.0: ; %entry
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: agent_one_as_acq_rel_fence:
; GFX10-CU: ; %bb.0: ; %entry
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: agent_one_as_acq_rel_fence:
@@ -1620,14 +1620,14 @@ define amdgpu_kernel void @agent_one_as_acq_rel_fence() {
;
; GFX11-WGP-LABEL: agent_one_as_acq_rel_fence:
; GFX11-WGP: ; %bb.0: ; %entry
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: agent_one_as_acq_rel_fence:
; GFX11-CU: ; %bb.0: ; %entry
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
entry:
fence syncscope("agent-one-as") acq_rel
@@ -1647,14 +1647,14 @@ define amdgpu_kernel void @agent_one_as_seq_cst_fence() {
;
; GFX10-WGP-LABEL: agent_one_as_seq_cst_fence:
; GFX10-WGP: ; %bb.0: ; %entry
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: agent_one_as_seq_cst_fence:
; GFX10-CU: ; %bb.0: ; %entry
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: agent_one_as_seq_cst_fence:
@@ -1685,14 +1685,14 @@ define amdgpu_kernel void @agent_one_as_seq_cst_fence() {
;
; GFX11-WGP-LABEL: agent_one_as_seq_cst_fence:
; GFX11-WGP: ; %bb.0: ; %entry
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: agent_one_as_seq_cst_fence:
; GFX11-CU: ; %bb.0: ; %entry
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
entry:
fence syncscope("agent-one-as") seq_cst
@@ -1712,14 +1712,14 @@ define amdgpu_kernel void @system_acquire_fence() {
;
; GFX10-WGP-LABEL: system_acquire_fence:
; GFX10-WGP: ; %bb.0: ; %entry
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: system_acquire_fence:
; GFX10-CU: ; %bb.0: ; %entry
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: system_acquire_fence:
@@ -1750,14 +1750,14 @@ define amdgpu_kernel void @system_acquire_fence() {
;
; GFX11-WGP-LABEL: system_acquire_fence:
; GFX11-WGP: ; %bb.0: ; %entry
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: system_acquire_fence:
; GFX11-CU: ; %bb.0: ; %entry
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
entry:
fence acquire
@@ -1830,14 +1830,14 @@ define amdgpu_kernel void @system_acq_rel_fence() {
;
; GFX10-WGP-LABEL: system_acq_rel_fence:
; GFX10-WGP: ; %bb.0: ; %entry
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: system_acq_rel_fence:
; GFX10-CU: ; %bb.0: ; %entry
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: system_acq_rel_fence:
@@ -1872,14 +1872,14 @@ define amdgpu_kernel void @system_acq_rel_fence() {
;
; GFX11-WGP-LABEL: system_acq_rel_fence:
; GFX11-WGP: ; %bb.0: ; %entry
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: system_acq_rel_fence:
; GFX11-CU: ; %bb.0: ; %entry
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
entry:
fence acq_rel
@@ -1899,14 +1899,14 @@ define amdgpu_kernel void @system_seq_cst_fence() {
;
; GFX10-WGP-LABEL: system_seq_cst_fence:
; GFX10-WGP: ; %bb.0: ; %entry
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: system_seq_cst_fence:
; GFX10-CU: ; %bb.0: ; %entry
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: system_seq_cst_fence:
@@ -1941,14 +1941,14 @@ define amdgpu_kernel void @system_seq_cst_fence() {
;
; GFX11-WGP-LABEL: system_seq_cst_fence:
; GFX11-WGP: ; %bb.0: ; %entry
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: system_seq_cst_fence:
; GFX11-CU: ; %bb.0: ; %entry
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
entry:
fence seq_cst
@@ -1968,14 +1968,14 @@ define amdgpu_kernel void @system_one_as_acquire_fence() {
;
; GFX10-WGP-LABEL: system_one_as_acquire_fence:
; GFX10-WGP: ; %bb.0: ; %entry
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: system_one_as_acquire_fence:
; GFX10-CU: ; %bb.0: ; %entry
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: system_one_as_acquire_fence:
@@ -2006,14 +2006,14 @@ define amdgpu_kernel void @system_one_as_acquire_fence() {
;
; GFX11-WGP-LABEL: system_one_as_acquire_fence:
; GFX11-WGP: ; %bb.0: ; %entry
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: system_one_as_acquire_fence:
; GFX11-CU: ; %bb.0: ; %entry
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
entry:
fence syncscope("one-as") acquire
@@ -2086,14 +2086,14 @@ define amdgpu_kernel void @system_one_as_acq_rel_fence() {
;
; GFX10-WGP-LABEL: system_one_as_acq_rel_fence:
; GFX10-WGP: ; %bb.0: ; %entry
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: system_one_as_acq_rel_fence:
; GFX10-CU: ; %bb.0: ; %entry
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: system_one_as_acq_rel_fence:
@@ -2128,14 +2128,14 @@ define amdgpu_kernel void @system_one_as_acq_rel_fence() {
;
; GFX11-WGP-LABEL: system_one_as_acq_rel_fence:
; GFX11-WGP: ; %bb.0: ; %entry
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: system_one_as_acq_rel_fence:
; GFX11-CU: ; %bb.0: ; %entry
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
entry:
fence syncscope("one-as") acq_rel
@@ -2155,14 +2155,14 @@ define amdgpu_kernel void @system_one_as_seq_cst_fence() {
;
; GFX10-WGP-LABEL: system_one_as_seq_cst_fence:
; GFX10-WGP: ; %bb.0: ; %entry
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: system_one_as_seq_cst_fence:
; GFX10-CU: ; %bb.0: ; %entry
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: system_one_as_seq_cst_fence:
@@ -2197,14 +2197,14 @@ define amdgpu_kernel void @system_one_as_seq_cst_fence() {
;
; GFX11-WGP-LABEL: system_one_as_seq_cst_fence:
; GFX11-WGP: ; %bb.0: ; %entry
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: system_one_as_seq_cst_fence:
; GFX11-CU: ; %bb.0: ; %entry
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
entry:
fence syncscope("one-as") seq_cst
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-agent.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-agent.ll
index d6c759f..d9f5e64 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-agent.ll
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-agent.ll
@@ -299,8 +299,8 @@ define amdgpu_kernel void @flat_agent_acquire_load(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_load_dword v2, v[0:1] glc dlc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s2
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -314,8 +314,8 @@ define amdgpu_kernel void @flat_agent_acquire_load(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_load_dword v2, v[0:1] glc dlc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -397,8 +397,8 @@ define amdgpu_kernel void @flat_agent_acquire_load(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-WGP-NEXT: flat_load_b32 v2, v[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -410,8 +410,8 @@ define amdgpu_kernel void @flat_agent_acquire_load(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-CU-NEXT: flat_load_b32 v2, v[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -445,8 +445,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_load(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_load_dword v2, v[0:1] glc dlc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s2
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -460,8 +460,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_load(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_load_dword v2, v[0:1] glc dlc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -543,8 +543,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_load(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-WGP-NEXT: flat_load_b32 v2, v[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -556,8 +556,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_load(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-CU-NEXT: flat_load_b32 v2, v[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -1168,8 +1168,8 @@ define amdgpu_kernel void @flat_agent_acquire_atomicrmw(
; GFX10-WGP-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_acquire_atomicrmw:
@@ -1184,8 +1184,8 @@ define amdgpu_kernel void @flat_agent_acquire_atomicrmw(
; GFX10-CU-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_acquire_atomicrmw:
@@ -1259,8 +1259,8 @@ define amdgpu_kernel void @flat_agent_acquire_atomicrmw(
; GFX11-WGP-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_acquire_atomicrmw:
@@ -1274,8 +1274,8 @@ define amdgpu_kernel void @flat_agent_acquire_atomicrmw(
; GFX11-CU-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in) {
entry:
@@ -1425,8 +1425,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_atomicrmw(
; GFX10-WGP-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_acq_rel_atomicrmw:
@@ -1441,8 +1441,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_atomicrmw(
; GFX10-CU-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_acq_rel_atomicrmw:
@@ -1518,8 +1518,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_atomicrmw(
; GFX11-WGP-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_acq_rel_atomicrmw:
@@ -1533,8 +1533,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_atomicrmw(
; GFX11-CU-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in) {
entry:
@@ -1568,8 +1568,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_atomicrmw(
; GFX10-WGP-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_seq_cst_atomicrmw:
@@ -1584,8 +1584,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_atomicrmw(
; GFX10-CU-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_seq_cst_atomicrmw:
@@ -1661,8 +1661,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_atomicrmw(
; GFX11-WGP-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_seq_cst_atomicrmw:
@@ -1676,8 +1676,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_atomicrmw(
; GFX11-CU-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in) {
entry:
@@ -1711,8 +1711,8 @@ define amdgpu_kernel void @flat_agent_acquire_ret_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v2, s2
; GFX10-WGP-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
; GFX10-WGP-NEXT: s_endpgm
;
@@ -1727,8 +1727,8 @@ define amdgpu_kernel void @flat_agent_acquire_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s2
; GFX10-CU-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -1807,8 +1807,8 @@ define amdgpu_kernel void @flat_agent_acquire_ret_atomicrmw(
; GFX11-WGP-NEXT: v_mov_b32_e32 v2, s0
; GFX11-WGP-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -1822,8 +1822,8 @@ define amdgpu_kernel void @flat_agent_acquire_ret_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in) {
@@ -1859,8 +1859,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_ret_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v2, s2
; GFX10-WGP-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
; GFX10-WGP-NEXT: s_endpgm
;
@@ -1875,8 +1875,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s2
; GFX10-CU-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -1957,8 +1957,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_ret_atomicrmw(
; GFX11-WGP-NEXT: v_mov_b32_e32 v2, s0
; GFX11-WGP-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -1972,8 +1972,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_ret_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in) {
@@ -2009,8 +2009,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_ret_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v2, s2
; GFX10-WGP-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
; GFX10-WGP-NEXT: s_endpgm
;
@@ -2025,8 +2025,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s2
; GFX10-CU-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -2107,8 +2107,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_ret_atomicrmw(
; GFX11-WGP-NEXT: v_mov_b32_e32 v2, s0
; GFX11-WGP-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -2122,8 +2122,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_ret_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in) {
@@ -2275,8 +2275,8 @@ define amdgpu_kernel void @flat_agent_acquire_monotonic_cmpxchg(
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_acquire_monotonic_cmpxchg:
@@ -2292,8 +2292,8 @@ define amdgpu_kernel void @flat_agent_acquire_monotonic_cmpxchg(
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_acquire_monotonic_cmpxchg:
@@ -2363,8 +2363,8 @@ define amdgpu_kernel void @flat_agent_acquire_monotonic_cmpxchg(
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_acquire_monotonic_cmpxchg:
@@ -2376,8 +2376,8 @@ define amdgpu_kernel void @flat_agent_acquire_monotonic_cmpxchg(
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -2530,8 +2530,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_monotonic_cmpxchg(
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_acq_rel_monotonic_cmpxchg:
@@ -2547,8 +2547,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_monotonic_cmpxchg(
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_acq_rel_monotonic_cmpxchg:
@@ -2620,8 +2620,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_monotonic_cmpxchg(
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_acq_rel_monotonic_cmpxchg:
@@ -2633,8 +2633,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_monotonic_cmpxchg(
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -2672,8 +2672,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_monotonic_cmpxchg(
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_seq_cst_monotonic_cmpxchg:
@@ -2689,8 +2689,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_monotonic_cmpxchg(
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_seq_cst_monotonic_cmpxchg:
@@ -2762,8 +2762,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_monotonic_cmpxchg(
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_seq_cst_monotonic_cmpxchg:
@@ -2775,8 +2775,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_monotonic_cmpxchg(
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -2814,8 +2814,8 @@ define amdgpu_kernel void @flat_agent_monotonic_acquire_cmpxchg(
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_monotonic_acquire_cmpxchg:
@@ -2831,8 +2831,8 @@ define amdgpu_kernel void @flat_agent_monotonic_acquire_cmpxchg(
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_monotonic_acquire_cmpxchg:
@@ -2902,8 +2902,8 @@ define amdgpu_kernel void @flat_agent_monotonic_acquire_cmpxchg(
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_monotonic_acquire_cmpxchg:
@@ -2915,8 +2915,8 @@ define amdgpu_kernel void @flat_agent_monotonic_acquire_cmpxchg(
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -2954,8 +2954,8 @@ define amdgpu_kernel void @flat_agent_acquire_acquire_cmpxchg(
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_acquire_acquire_cmpxchg:
@@ -2971,8 +2971,8 @@ define amdgpu_kernel void @flat_agent_acquire_acquire_cmpxchg(
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_acquire_acquire_cmpxchg:
@@ -3042,8 +3042,8 @@ define amdgpu_kernel void @flat_agent_acquire_acquire_cmpxchg(
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_acquire_acquire_cmpxchg:
@@ -3055,8 +3055,8 @@ define amdgpu_kernel void @flat_agent_acquire_acquire_cmpxchg(
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -3094,8 +3094,8 @@ define amdgpu_kernel void @flat_agent_release_acquire_cmpxchg(
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_release_acquire_cmpxchg:
@@ -3111,8 +3111,8 @@ define amdgpu_kernel void @flat_agent_release_acquire_cmpxchg(
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_release_acquire_cmpxchg:
@@ -3184,8 +3184,8 @@ define amdgpu_kernel void @flat_agent_release_acquire_cmpxchg(
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_release_acquire_cmpxchg:
@@ -3197,8 +3197,8 @@ define amdgpu_kernel void @flat_agent_release_acquire_cmpxchg(
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -3236,8 +3236,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_acquire_cmpxchg(
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_acq_rel_acquire_cmpxchg:
@@ -3253,8 +3253,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_acquire_cmpxchg(
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_acq_rel_acquire_cmpxchg:
@@ -3326,8 +3326,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_acquire_cmpxchg(
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_acq_rel_acquire_cmpxchg:
@@ -3339,8 +3339,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_acquire_cmpxchg(
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -3378,8 +3378,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_acquire_cmpxchg(
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_seq_cst_acquire_cmpxchg:
@@ -3395,8 +3395,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_acquire_cmpxchg(
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_seq_cst_acquire_cmpxchg:
@@ -3468,8 +3468,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_acquire_cmpxchg(
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_seq_cst_acquire_cmpxchg:
@@ -3481,8 +3481,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_acquire_cmpxchg(
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -3520,8 +3520,8 @@ define amdgpu_kernel void @flat_agent_monotonic_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_monotonic_seq_cst_cmpxchg:
@@ -3537,8 +3537,8 @@ define amdgpu_kernel void @flat_agent_monotonic_seq_cst_cmpxchg(
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_monotonic_seq_cst_cmpxchg:
@@ -3610,8 +3610,8 @@ define amdgpu_kernel void @flat_agent_monotonic_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_monotonic_seq_cst_cmpxchg:
@@ -3623,8 +3623,8 @@ define amdgpu_kernel void @flat_agent_monotonic_seq_cst_cmpxchg(
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -3662,8 +3662,8 @@ define amdgpu_kernel void @flat_agent_acquire_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_acquire_seq_cst_cmpxchg:
@@ -3679,8 +3679,8 @@ define amdgpu_kernel void @flat_agent_acquire_seq_cst_cmpxchg(
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_acquire_seq_cst_cmpxchg:
@@ -3752,8 +3752,8 @@ define amdgpu_kernel void @flat_agent_acquire_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_acquire_seq_cst_cmpxchg:
@@ -3765,8 +3765,8 @@ define amdgpu_kernel void @flat_agent_acquire_seq_cst_cmpxchg(
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -3804,8 +3804,8 @@ define amdgpu_kernel void @flat_agent_release_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_release_seq_cst_cmpxchg:
@@ -3821,8 +3821,8 @@ define amdgpu_kernel void @flat_agent_release_seq_cst_cmpxchg(
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_release_seq_cst_cmpxchg:
@@ -3894,8 +3894,8 @@ define amdgpu_kernel void @flat_agent_release_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_release_seq_cst_cmpxchg:
@@ -3907,8 +3907,8 @@ define amdgpu_kernel void @flat_agent_release_seq_cst_cmpxchg(
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -3946,8 +3946,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_acq_rel_seq_cst_cmpxchg:
@@ -3963,8 +3963,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_seq_cst_cmpxchg(
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_acq_rel_seq_cst_cmpxchg:
@@ -4036,8 +4036,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_acq_rel_seq_cst_cmpxchg:
@@ -4049,8 +4049,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_seq_cst_cmpxchg(
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -4088,8 +4088,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_seq_cst_seq_cst_cmpxchg:
@@ -4105,8 +4105,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_seq_cst_cmpxchg(
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_seq_cst_seq_cst_cmpxchg:
@@ -4178,8 +4178,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_seq_cst_seq_cst_cmpxchg:
@@ -4191,8 +4191,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_seq_cst_cmpxchg(
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -4375,8 +4375,8 @@ define amdgpu_kernel void @flat_agent_acquire_monotonic_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -4394,8 +4394,8 @@ define amdgpu_kernel void @flat_agent_acquire_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -4474,8 +4474,8 @@ define amdgpu_kernel void @flat_agent_acquire_monotonic_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -4487,8 +4487,8 @@ define amdgpu_kernel void @flat_agent_acquire_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
@@ -4676,8 +4676,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_monotonic_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -4695,8 +4695,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -4777,8 +4777,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_monotonic_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -4790,8 +4790,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
@@ -4834,8 +4834,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_monotonic_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -4853,8 +4853,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -4935,8 +4935,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_monotonic_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -4948,8 +4948,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
@@ -4992,8 +4992,8 @@ define amdgpu_kernel void @flat_agent_monotonic_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -5011,8 +5011,8 @@ define amdgpu_kernel void @flat_agent_monotonic_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -5091,8 +5091,8 @@ define amdgpu_kernel void @flat_agent_monotonic_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -5104,8 +5104,8 @@ define amdgpu_kernel void @flat_agent_monotonic_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
@@ -5148,8 +5148,8 @@ define amdgpu_kernel void @flat_agent_acquire_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -5167,8 +5167,8 @@ define amdgpu_kernel void @flat_agent_acquire_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -5247,8 +5247,8 @@ define amdgpu_kernel void @flat_agent_acquire_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -5260,8 +5260,8 @@ define amdgpu_kernel void @flat_agent_acquire_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
@@ -5304,8 +5304,8 @@ define amdgpu_kernel void @flat_agent_release_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -5323,8 +5323,8 @@ define amdgpu_kernel void @flat_agent_release_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -5405,8 +5405,8 @@ define amdgpu_kernel void @flat_agent_release_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -5418,8 +5418,8 @@ define amdgpu_kernel void @flat_agent_release_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
@@ -5462,8 +5462,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -5481,8 +5481,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -5563,8 +5563,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -5576,8 +5576,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
@@ -5620,8 +5620,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -5639,8 +5639,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -5721,8 +5721,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -5734,8 +5734,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
@@ -5778,8 +5778,8 @@ define amdgpu_kernel void @flat_agent_monotonic_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -5797,8 +5797,8 @@ define amdgpu_kernel void @flat_agent_monotonic_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -5879,8 +5879,8 @@ define amdgpu_kernel void @flat_agent_monotonic_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -5892,8 +5892,8 @@ define amdgpu_kernel void @flat_agent_monotonic_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
@@ -5936,8 +5936,8 @@ define amdgpu_kernel void @flat_agent_acquire_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -5955,8 +5955,8 @@ define amdgpu_kernel void @flat_agent_acquire_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -6037,8 +6037,8 @@ define amdgpu_kernel void @flat_agent_acquire_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -6050,8 +6050,8 @@ define amdgpu_kernel void @flat_agent_acquire_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
@@ -6094,8 +6094,8 @@ define amdgpu_kernel void @flat_agent_release_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -6113,8 +6113,8 @@ define amdgpu_kernel void @flat_agent_release_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -6195,8 +6195,8 @@ define amdgpu_kernel void @flat_agent_release_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -6208,8 +6208,8 @@ define amdgpu_kernel void @flat_agent_release_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
@@ -6252,8 +6252,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -6271,8 +6271,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -6353,8 +6353,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -6366,8 +6366,8 @@ define amdgpu_kernel void @flat_agent_acq_rel_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
@@ -6410,8 +6410,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -6429,8 +6429,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -6511,8 +6511,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -6524,8 +6524,8 @@ define amdgpu_kernel void @flat_agent_seq_cst_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
@@ -6827,8 +6827,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_load(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_load_dword v2, v[0:1] glc dlc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s2
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -6843,8 +6843,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_load(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_load_dword v2, v[0:1] glc dlc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -6930,8 +6930,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_load(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-WGP-NEXT: flat_load_b32 v2, v[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
@@ -6944,8 +6944,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_load(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-CU-NEXT: flat_load_b32 v2, v[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
@@ -6981,8 +6981,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_load(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_load_dword v2, v[0:1] glc dlc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s2
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -6997,8 +6997,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_load(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_load_dword v2, v[0:1] glc dlc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -7084,8 +7084,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_load(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-WGP-NEXT: flat_load_b32 v2, v[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
@@ -7098,8 +7098,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_load(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-CU-NEXT: flat_load_b32 v2, v[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
@@ -7710,8 +7710,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v2, s2
; GFX10-WGP-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_one_as_acquire_atomicrmw:
@@ -7725,8 +7725,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s2
; GFX10-CU-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_one_as_acquire_atomicrmw:
@@ -7799,8 +7799,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_atomicrmw(
; GFX11-WGP-NEXT: v_mov_b32_e32 v2, s0
; GFX11-WGP-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_one_as_acquire_atomicrmw:
@@ -7813,8 +7813,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in) {
entry:
@@ -7963,8 +7963,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v2, s2
; GFX10-WGP-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_one_as_acq_rel_atomicrmw:
@@ -7978,8 +7978,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s2
; GFX10-CU-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_one_as_acq_rel_atomicrmw:
@@ -8054,8 +8054,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_atomicrmw(
; GFX11-WGP-NEXT: v_mov_b32_e32 v2, s0
; GFX11-WGP-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_one_as_acq_rel_atomicrmw:
@@ -8068,8 +8068,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in) {
entry:
@@ -8102,8 +8102,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v2, s2
; GFX10-WGP-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_one_as_seq_cst_atomicrmw:
@@ -8117,8 +8117,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s2
; GFX10-CU-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_one_as_seq_cst_atomicrmw:
@@ -8193,8 +8193,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_atomicrmw(
; GFX11-WGP-NEXT: v_mov_b32_e32 v2, s0
; GFX11-WGP-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_one_as_seq_cst_atomicrmw:
@@ -8207,8 +8207,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in) {
entry:
@@ -8243,8 +8243,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_ret_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v2, s2
; GFX10-WGP-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
; GFX10-WGP-NEXT: s_endpgm
@@ -8260,8 +8260,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s2
; GFX10-CU-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
@@ -8343,8 +8343,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_ret_atomicrmw(
; GFX11-WGP-NEXT: v_mov_b32_e32 v2, s0
; GFX11-WGP-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -8359,8 +8359,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_ret_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -8398,8 +8398,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_ret_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v2, s2
; GFX10-WGP-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
; GFX10-WGP-NEXT: s_endpgm
@@ -8415,8 +8415,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s2
; GFX10-CU-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
@@ -8500,8 +8500,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_ret_atomicrmw(
; GFX11-WGP-NEXT: v_mov_b32_e32 v2, s0
; GFX11-WGP-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -8516,8 +8516,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_ret_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -8555,8 +8555,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_ret_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v2, s2
; GFX10-WGP-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
; GFX10-WGP-NEXT: s_endpgm
@@ -8572,8 +8572,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s2
; GFX10-CU-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
@@ -8657,8 +8657,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_ret_atomicrmw(
; GFX11-WGP-NEXT: v_mov_b32_e32 v2, s0
; GFX11-WGP-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -8673,8 +8673,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_ret_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -8826,8 +8826,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_monotonic_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_one_as_acquire_monotonic_cmpxchg:
@@ -8842,8 +8842,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_one_as_acquire_monotonic_cmpxchg:
@@ -8912,8 +8912,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_monotonic_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_one_as_acquire_monotonic_cmpxchg:
@@ -8924,8 +8924,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -9077,8 +9077,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_monotonic_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_one_as_acq_rel_monotonic_cmpxchg:
@@ -9093,8 +9093,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_one_as_acq_rel_monotonic_cmpxchg:
@@ -9165,8 +9165,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_monotonic_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_one_as_acq_rel_monotonic_cmpxchg:
@@ -9177,8 +9177,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -9215,8 +9215,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_monotonic_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_one_as_seq_cst_monotonic_cmpxchg:
@@ -9231,8 +9231,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_one_as_seq_cst_monotonic_cmpxchg:
@@ -9303,8 +9303,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_monotonic_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_one_as_seq_cst_monotonic_cmpxchg:
@@ -9315,8 +9315,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -9353,8 +9353,8 @@ define amdgpu_kernel void @flat_agent_one_as_monotonic_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_one_as_monotonic_acquire_cmpxchg:
@@ -9369,8 +9369,8 @@ define amdgpu_kernel void @flat_agent_one_as_monotonic_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_one_as_monotonic_acquire_cmpxchg:
@@ -9439,8 +9439,8 @@ define amdgpu_kernel void @flat_agent_one_as_monotonic_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_one_as_monotonic_acquire_cmpxchg:
@@ -9451,8 +9451,8 @@ define amdgpu_kernel void @flat_agent_one_as_monotonic_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -9489,8 +9489,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_one_as_acquire_acquire_cmpxchg:
@@ -9505,8 +9505,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_one_as_acquire_acquire_cmpxchg:
@@ -9575,8 +9575,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_one_as_acquire_acquire_cmpxchg:
@@ -9587,8 +9587,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -9625,8 +9625,8 @@ define amdgpu_kernel void @flat_agent_one_as_release_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_one_as_release_acquire_cmpxchg:
@@ -9641,8 +9641,8 @@ define amdgpu_kernel void @flat_agent_one_as_release_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_one_as_release_acquire_cmpxchg:
@@ -9713,8 +9713,8 @@ define amdgpu_kernel void @flat_agent_one_as_release_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_one_as_release_acquire_cmpxchg:
@@ -9725,8 +9725,8 @@ define amdgpu_kernel void @flat_agent_one_as_release_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -9763,8 +9763,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_one_as_acq_rel_acquire_cmpxchg:
@@ -9779,8 +9779,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_one_as_acq_rel_acquire_cmpxchg:
@@ -9851,8 +9851,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_one_as_acq_rel_acquire_cmpxchg:
@@ -9863,8 +9863,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -9901,8 +9901,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_one_as_seq_cst_acquire_cmpxchg:
@@ -9917,8 +9917,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_one_as_seq_cst_acquire_cmpxchg:
@@ -9989,8 +9989,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_one_as_seq_cst_acquire_cmpxchg:
@@ -10001,8 +10001,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -10039,8 +10039,8 @@ define amdgpu_kernel void @flat_agent_one_as_monotonic_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_one_as_monotonic_seq_cst_cmpxchg:
@@ -10055,8 +10055,8 @@ define amdgpu_kernel void @flat_agent_one_as_monotonic_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_one_as_monotonic_seq_cst_cmpxchg:
@@ -10127,8 +10127,8 @@ define amdgpu_kernel void @flat_agent_one_as_monotonic_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_one_as_monotonic_seq_cst_cmpxchg:
@@ -10139,8 +10139,8 @@ define amdgpu_kernel void @flat_agent_one_as_monotonic_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -10177,8 +10177,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_one_as_acquire_seq_cst_cmpxchg:
@@ -10193,8 +10193,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_one_as_acquire_seq_cst_cmpxchg:
@@ -10265,8 +10265,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_one_as_acquire_seq_cst_cmpxchg:
@@ -10277,8 +10277,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -10315,8 +10315,8 @@ define amdgpu_kernel void @flat_agent_one_as_release_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_one_as_release_seq_cst_cmpxchg:
@@ -10331,8 +10331,8 @@ define amdgpu_kernel void @flat_agent_one_as_release_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_one_as_release_seq_cst_cmpxchg:
@@ -10403,8 +10403,8 @@ define amdgpu_kernel void @flat_agent_one_as_release_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_one_as_release_seq_cst_cmpxchg:
@@ -10415,8 +10415,8 @@ define amdgpu_kernel void @flat_agent_one_as_release_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -10453,8 +10453,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_one_as_acq_rel_seq_cst_cmpxchg:
@@ -10469,8 +10469,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_one_as_acq_rel_seq_cst_cmpxchg:
@@ -10541,8 +10541,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_one_as_acq_rel_seq_cst_cmpxchg:
@@ -10553,8 +10553,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -10591,8 +10591,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_agent_one_as_seq_cst_seq_cst_cmpxchg:
@@ -10607,8 +10607,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_agent_one_as_seq_cst_seq_cst_cmpxchg:
@@ -10679,8 +10679,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_agent_one_as_seq_cst_seq_cst_cmpxchg:
@@ -10691,8 +10691,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -10876,8 +10876,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_monotonic_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -10896,8 +10896,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -10980,8 +10980,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_monotonic_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -10994,8 +10994,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -11185,8 +11185,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_monotonic_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -11205,8 +11205,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -11291,8 +11291,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_monotonic_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -11305,8 +11305,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -11351,8 +11351,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_monotonic_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -11371,8 +11371,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -11457,8 +11457,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_monotonic_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -11471,8 +11471,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -11517,8 +11517,8 @@ define amdgpu_kernel void @flat_agent_one_as_monotonic_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -11537,8 +11537,8 @@ define amdgpu_kernel void @flat_agent_one_as_monotonic_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -11621,8 +11621,8 @@ define amdgpu_kernel void @flat_agent_one_as_monotonic_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -11635,8 +11635,8 @@ define amdgpu_kernel void @flat_agent_one_as_monotonic_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -11681,8 +11681,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -11701,8 +11701,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -11785,8 +11785,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -11799,8 +11799,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -11845,8 +11845,8 @@ define amdgpu_kernel void @flat_agent_one_as_release_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -11865,8 +11865,8 @@ define amdgpu_kernel void @flat_agent_one_as_release_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -11951,8 +11951,8 @@ define amdgpu_kernel void @flat_agent_one_as_release_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -11965,8 +11965,8 @@ define amdgpu_kernel void @flat_agent_one_as_release_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -12011,8 +12011,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -12031,8 +12031,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -12117,8 +12117,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -12131,8 +12131,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -12177,8 +12177,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -12197,8 +12197,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -12283,8 +12283,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -12297,8 +12297,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -12343,8 +12343,8 @@ define amdgpu_kernel void @flat_agent_one_as_monotonic_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -12363,8 +12363,8 @@ define amdgpu_kernel void @flat_agent_one_as_monotonic_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -12449,8 +12449,8 @@ define amdgpu_kernel void @flat_agent_one_as_monotonic_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -12463,8 +12463,8 @@ define amdgpu_kernel void @flat_agent_one_as_monotonic_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -12509,8 +12509,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -12529,8 +12529,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -12615,8 +12615,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -12629,8 +12629,8 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -12675,8 +12675,8 @@ define amdgpu_kernel void @flat_agent_one_as_release_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -12695,8 +12695,8 @@ define amdgpu_kernel void @flat_agent_one_as_release_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -12781,8 +12781,8 @@ define amdgpu_kernel void @flat_agent_one_as_release_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -12795,8 +12795,8 @@ define amdgpu_kernel void @flat_agent_one_as_release_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -12841,8 +12841,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -12861,8 +12861,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -12947,8 +12947,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -12961,8 +12961,8 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -13007,8 +13007,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -13027,8 +13027,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -13113,8 +13113,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -13127,8 +13127,8 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-system.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-system.ll
index 461fe8b1..372f9ad 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-system.ll
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-system.ll
@@ -299,8 +299,8 @@ define amdgpu_kernel void @flat_system_acquire_load(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_load_dword v2, v[0:1] glc dlc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s2
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -314,8 +314,8 @@ define amdgpu_kernel void @flat_system_acquire_load(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_load_dword v2, v[0:1] glc dlc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -399,8 +399,8 @@ define amdgpu_kernel void @flat_system_acquire_load(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-WGP-NEXT: flat_load_b32 v2, v[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -412,8 +412,8 @@ define amdgpu_kernel void @flat_system_acquire_load(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-CU-NEXT: flat_load_b32 v2, v[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -447,8 +447,8 @@ define amdgpu_kernel void @flat_system_seq_cst_load(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_load_dword v2, v[0:1] glc dlc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s2
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -462,8 +462,8 @@ define amdgpu_kernel void @flat_system_seq_cst_load(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_load_dword v2, v[0:1] glc dlc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -547,8 +547,8 @@ define amdgpu_kernel void @flat_system_seq_cst_load(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-WGP-NEXT: flat_load_b32 v2, v[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -560,8 +560,8 @@ define amdgpu_kernel void @flat_system_seq_cst_load(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-CU-NEXT: flat_load_b32 v2, v[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -1176,8 +1176,8 @@ define amdgpu_kernel void @flat_system_acquire_atomicrmw(
; GFX10-WGP-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_acquire_atomicrmw:
@@ -1192,8 +1192,8 @@ define amdgpu_kernel void @flat_system_acquire_atomicrmw(
; GFX10-CU-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_acquire_atomicrmw:
@@ -1269,8 +1269,8 @@ define amdgpu_kernel void @flat_system_acquire_atomicrmw(
; GFX11-WGP-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_acquire_atomicrmw:
@@ -1284,8 +1284,8 @@ define amdgpu_kernel void @flat_system_acquire_atomicrmw(
; GFX11-CU-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in) {
entry:
@@ -1437,8 +1437,8 @@ define amdgpu_kernel void @flat_system_acq_rel_atomicrmw(
; GFX10-WGP-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_acq_rel_atomicrmw:
@@ -1453,8 +1453,8 @@ define amdgpu_kernel void @flat_system_acq_rel_atomicrmw(
; GFX10-CU-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_acq_rel_atomicrmw:
@@ -1534,8 +1534,8 @@ define amdgpu_kernel void @flat_system_acq_rel_atomicrmw(
; GFX11-WGP-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_acq_rel_atomicrmw:
@@ -1549,8 +1549,8 @@ define amdgpu_kernel void @flat_system_acq_rel_atomicrmw(
; GFX11-CU-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in) {
entry:
@@ -1584,8 +1584,8 @@ define amdgpu_kernel void @flat_system_seq_cst_atomicrmw(
; GFX10-WGP-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_seq_cst_atomicrmw:
@@ -1600,8 +1600,8 @@ define amdgpu_kernel void @flat_system_seq_cst_atomicrmw(
; GFX10-CU-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_seq_cst_atomicrmw:
@@ -1681,8 +1681,8 @@ define amdgpu_kernel void @flat_system_seq_cst_atomicrmw(
; GFX11-WGP-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_seq_cst_atomicrmw:
@@ -1696,8 +1696,8 @@ define amdgpu_kernel void @flat_system_seq_cst_atomicrmw(
; GFX11-CU-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in) {
entry:
@@ -1731,8 +1731,8 @@ define amdgpu_kernel void @flat_system_acquire_ret_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v2, s2
; GFX10-WGP-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
; GFX10-WGP-NEXT: s_endpgm
;
@@ -1747,8 +1747,8 @@ define amdgpu_kernel void @flat_system_acquire_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s2
; GFX10-CU-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -1829,8 +1829,8 @@ define amdgpu_kernel void @flat_system_acquire_ret_atomicrmw(
; GFX11-WGP-NEXT: v_mov_b32_e32 v2, s0
; GFX11-WGP-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -1844,8 +1844,8 @@ define amdgpu_kernel void @flat_system_acquire_ret_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in) {
@@ -1881,8 +1881,8 @@ define amdgpu_kernel void @flat_system_acq_rel_ret_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v2, s2
; GFX10-WGP-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
; GFX10-WGP-NEXT: s_endpgm
;
@@ -1897,8 +1897,8 @@ define amdgpu_kernel void @flat_system_acq_rel_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s2
; GFX10-CU-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -1983,8 +1983,8 @@ define amdgpu_kernel void @flat_system_acq_rel_ret_atomicrmw(
; GFX11-WGP-NEXT: v_mov_b32_e32 v2, s0
; GFX11-WGP-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -1998,8 +1998,8 @@ define amdgpu_kernel void @flat_system_acq_rel_ret_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in) {
@@ -2035,8 +2035,8 @@ define amdgpu_kernel void @flat_system_seq_cst_ret_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v2, s2
; GFX10-WGP-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
; GFX10-WGP-NEXT: s_endpgm
;
@@ -2051,8 +2051,8 @@ define amdgpu_kernel void @flat_system_seq_cst_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s2
; GFX10-CU-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -2137,8 +2137,8 @@ define amdgpu_kernel void @flat_system_seq_cst_ret_atomicrmw(
; GFX11-WGP-NEXT: v_mov_b32_e32 v2, s0
; GFX11-WGP-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -2152,8 +2152,8 @@ define amdgpu_kernel void @flat_system_seq_cst_ret_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in) {
@@ -2305,8 +2305,8 @@ define amdgpu_kernel void @flat_system_acquire_monotonic_cmpxchg(
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_acquire_monotonic_cmpxchg:
@@ -2322,8 +2322,8 @@ define amdgpu_kernel void @flat_system_acquire_monotonic_cmpxchg(
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_acquire_monotonic_cmpxchg:
@@ -2395,8 +2395,8 @@ define amdgpu_kernel void @flat_system_acquire_monotonic_cmpxchg(
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_acquire_monotonic_cmpxchg:
@@ -2408,8 +2408,8 @@ define amdgpu_kernel void @flat_system_acquire_monotonic_cmpxchg(
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -2564,8 +2564,8 @@ define amdgpu_kernel void @flat_system_acq_rel_monotonic_cmpxchg(
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_acq_rel_monotonic_cmpxchg:
@@ -2581,8 +2581,8 @@ define amdgpu_kernel void @flat_system_acq_rel_monotonic_cmpxchg(
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_acq_rel_monotonic_cmpxchg:
@@ -2658,8 +2658,8 @@ define amdgpu_kernel void @flat_system_acq_rel_monotonic_cmpxchg(
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_acq_rel_monotonic_cmpxchg:
@@ -2671,8 +2671,8 @@ define amdgpu_kernel void @flat_system_acq_rel_monotonic_cmpxchg(
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -2710,8 +2710,8 @@ define amdgpu_kernel void @flat_system_seq_cst_monotonic_cmpxchg(
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_seq_cst_monotonic_cmpxchg:
@@ -2727,8 +2727,8 @@ define amdgpu_kernel void @flat_system_seq_cst_monotonic_cmpxchg(
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_seq_cst_monotonic_cmpxchg:
@@ -2804,8 +2804,8 @@ define amdgpu_kernel void @flat_system_seq_cst_monotonic_cmpxchg(
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_seq_cst_monotonic_cmpxchg:
@@ -2817,8 +2817,8 @@ define amdgpu_kernel void @flat_system_seq_cst_monotonic_cmpxchg(
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -2856,8 +2856,8 @@ define amdgpu_kernel void @flat_system_monotonic_acquire_cmpxchg(
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_monotonic_acquire_cmpxchg:
@@ -2873,8 +2873,8 @@ define amdgpu_kernel void @flat_system_monotonic_acquire_cmpxchg(
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_monotonic_acquire_cmpxchg:
@@ -2946,8 +2946,8 @@ define amdgpu_kernel void @flat_system_monotonic_acquire_cmpxchg(
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_monotonic_acquire_cmpxchg:
@@ -2959,8 +2959,8 @@ define amdgpu_kernel void @flat_system_monotonic_acquire_cmpxchg(
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -2998,8 +2998,8 @@ define amdgpu_kernel void @flat_system_acquire_acquire_cmpxchg(
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_acquire_acquire_cmpxchg:
@@ -3015,8 +3015,8 @@ define amdgpu_kernel void @flat_system_acquire_acquire_cmpxchg(
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_acquire_acquire_cmpxchg:
@@ -3088,8 +3088,8 @@ define amdgpu_kernel void @flat_system_acquire_acquire_cmpxchg(
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_acquire_acquire_cmpxchg:
@@ -3101,8 +3101,8 @@ define amdgpu_kernel void @flat_system_acquire_acquire_cmpxchg(
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -3140,8 +3140,8 @@ define amdgpu_kernel void @flat_system_release_acquire_cmpxchg(
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_release_acquire_cmpxchg:
@@ -3157,8 +3157,8 @@ define amdgpu_kernel void @flat_system_release_acquire_cmpxchg(
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_release_acquire_cmpxchg:
@@ -3234,8 +3234,8 @@ define amdgpu_kernel void @flat_system_release_acquire_cmpxchg(
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_release_acquire_cmpxchg:
@@ -3247,8 +3247,8 @@ define amdgpu_kernel void @flat_system_release_acquire_cmpxchg(
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -3286,8 +3286,8 @@ define amdgpu_kernel void @flat_system_acq_rel_acquire_cmpxchg(
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_acq_rel_acquire_cmpxchg:
@@ -3303,8 +3303,8 @@ define amdgpu_kernel void @flat_system_acq_rel_acquire_cmpxchg(
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_acq_rel_acquire_cmpxchg:
@@ -3380,8 +3380,8 @@ define amdgpu_kernel void @flat_system_acq_rel_acquire_cmpxchg(
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_acq_rel_acquire_cmpxchg:
@@ -3393,8 +3393,8 @@ define amdgpu_kernel void @flat_system_acq_rel_acquire_cmpxchg(
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -3432,8 +3432,8 @@ define amdgpu_kernel void @flat_system_seq_cst_acquire_cmpxchg(
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_seq_cst_acquire_cmpxchg:
@@ -3449,8 +3449,8 @@ define amdgpu_kernel void @flat_system_seq_cst_acquire_cmpxchg(
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_seq_cst_acquire_cmpxchg:
@@ -3526,8 +3526,8 @@ define amdgpu_kernel void @flat_system_seq_cst_acquire_cmpxchg(
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_seq_cst_acquire_cmpxchg:
@@ -3539,8 +3539,8 @@ define amdgpu_kernel void @flat_system_seq_cst_acquire_cmpxchg(
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -3578,8 +3578,8 @@ define amdgpu_kernel void @flat_system_monotonic_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_monotonic_seq_cst_cmpxchg:
@@ -3595,8 +3595,8 @@ define amdgpu_kernel void @flat_system_monotonic_seq_cst_cmpxchg(
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_monotonic_seq_cst_cmpxchg:
@@ -3672,8 +3672,8 @@ define amdgpu_kernel void @flat_system_monotonic_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_monotonic_seq_cst_cmpxchg:
@@ -3685,8 +3685,8 @@ define amdgpu_kernel void @flat_system_monotonic_seq_cst_cmpxchg(
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -3724,8 +3724,8 @@ define amdgpu_kernel void @flat_system_acquire_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_acquire_seq_cst_cmpxchg:
@@ -3741,8 +3741,8 @@ define amdgpu_kernel void @flat_system_acquire_seq_cst_cmpxchg(
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_acquire_seq_cst_cmpxchg:
@@ -3818,8 +3818,8 @@ define amdgpu_kernel void @flat_system_acquire_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_acquire_seq_cst_cmpxchg:
@@ -3831,8 +3831,8 @@ define amdgpu_kernel void @flat_system_acquire_seq_cst_cmpxchg(
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -3870,8 +3870,8 @@ define amdgpu_kernel void @flat_system_release_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_release_seq_cst_cmpxchg:
@@ -3887,8 +3887,8 @@ define amdgpu_kernel void @flat_system_release_seq_cst_cmpxchg(
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_release_seq_cst_cmpxchg:
@@ -3964,8 +3964,8 @@ define amdgpu_kernel void @flat_system_release_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_release_seq_cst_cmpxchg:
@@ -3977,8 +3977,8 @@ define amdgpu_kernel void @flat_system_release_seq_cst_cmpxchg(
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -4016,8 +4016,8 @@ define amdgpu_kernel void @flat_system_acq_rel_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_acq_rel_seq_cst_cmpxchg:
@@ -4033,8 +4033,8 @@ define amdgpu_kernel void @flat_system_acq_rel_seq_cst_cmpxchg(
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_acq_rel_seq_cst_cmpxchg:
@@ -4110,8 +4110,8 @@ define amdgpu_kernel void @flat_system_acq_rel_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_acq_rel_seq_cst_cmpxchg:
@@ -4123,8 +4123,8 @@ define amdgpu_kernel void @flat_system_acq_rel_seq_cst_cmpxchg(
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -4162,8 +4162,8 @@ define amdgpu_kernel void @flat_system_seq_cst_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_seq_cst_seq_cst_cmpxchg:
@@ -4179,8 +4179,8 @@ define amdgpu_kernel void @flat_system_seq_cst_seq_cst_cmpxchg(
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_seq_cst_seq_cst_cmpxchg:
@@ -4256,8 +4256,8 @@ define amdgpu_kernel void @flat_system_seq_cst_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_seq_cst_seq_cst_cmpxchg:
@@ -4269,8 +4269,8 @@ define amdgpu_kernel void @flat_system_seq_cst_seq_cst_cmpxchg(
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -4453,8 +4453,8 @@ define amdgpu_kernel void @flat_system_acquire_monotonic_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -4472,8 +4472,8 @@ define amdgpu_kernel void @flat_system_acquire_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -4554,8 +4554,8 @@ define amdgpu_kernel void @flat_system_acquire_monotonic_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -4567,8 +4567,8 @@ define amdgpu_kernel void @flat_system_acquire_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
@@ -4758,8 +4758,8 @@ define amdgpu_kernel void @flat_system_acq_rel_monotonic_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -4777,8 +4777,8 @@ define amdgpu_kernel void @flat_system_acq_rel_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -4863,8 +4863,8 @@ define amdgpu_kernel void @flat_system_acq_rel_monotonic_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -4876,8 +4876,8 @@ define amdgpu_kernel void @flat_system_acq_rel_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
@@ -4920,8 +4920,8 @@ define amdgpu_kernel void @flat_system_seq_cst_monotonic_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -4939,8 +4939,8 @@ define amdgpu_kernel void @flat_system_seq_cst_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -5025,8 +5025,8 @@ define amdgpu_kernel void @flat_system_seq_cst_monotonic_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -5038,8 +5038,8 @@ define amdgpu_kernel void @flat_system_seq_cst_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
@@ -5082,8 +5082,8 @@ define amdgpu_kernel void @flat_system_monotonic_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -5101,8 +5101,8 @@ define amdgpu_kernel void @flat_system_monotonic_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -5183,8 +5183,8 @@ define amdgpu_kernel void @flat_system_monotonic_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -5196,8 +5196,8 @@ define amdgpu_kernel void @flat_system_monotonic_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
@@ -5240,8 +5240,8 @@ define amdgpu_kernel void @flat_system_acquire_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -5259,8 +5259,8 @@ define amdgpu_kernel void @flat_system_acquire_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -5341,8 +5341,8 @@ define amdgpu_kernel void @flat_system_acquire_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -5354,8 +5354,8 @@ define amdgpu_kernel void @flat_system_acquire_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
@@ -5398,8 +5398,8 @@ define amdgpu_kernel void @flat_system_release_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -5417,8 +5417,8 @@ define amdgpu_kernel void @flat_system_release_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -5503,8 +5503,8 @@ define amdgpu_kernel void @flat_system_release_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -5516,8 +5516,8 @@ define amdgpu_kernel void @flat_system_release_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
@@ -5560,8 +5560,8 @@ define amdgpu_kernel void @flat_system_acq_rel_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -5579,8 +5579,8 @@ define amdgpu_kernel void @flat_system_acq_rel_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -5665,8 +5665,8 @@ define amdgpu_kernel void @flat_system_acq_rel_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -5678,8 +5678,8 @@ define amdgpu_kernel void @flat_system_acq_rel_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
@@ -5722,8 +5722,8 @@ define amdgpu_kernel void @flat_system_seq_cst_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -5741,8 +5741,8 @@ define amdgpu_kernel void @flat_system_seq_cst_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -5827,8 +5827,8 @@ define amdgpu_kernel void @flat_system_seq_cst_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -5840,8 +5840,8 @@ define amdgpu_kernel void @flat_system_seq_cst_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
@@ -5884,8 +5884,8 @@ define amdgpu_kernel void @flat_system_monotonic_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -5903,8 +5903,8 @@ define amdgpu_kernel void @flat_system_monotonic_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -5989,8 +5989,8 @@ define amdgpu_kernel void @flat_system_monotonic_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -6002,8 +6002,8 @@ define amdgpu_kernel void @flat_system_monotonic_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
@@ -6046,8 +6046,8 @@ define amdgpu_kernel void @flat_system_acquire_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -6065,8 +6065,8 @@ define amdgpu_kernel void @flat_system_acquire_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -6151,8 +6151,8 @@ define amdgpu_kernel void @flat_system_acquire_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -6164,8 +6164,8 @@ define amdgpu_kernel void @flat_system_acquire_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
@@ -6208,8 +6208,8 @@ define amdgpu_kernel void @flat_system_release_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -6227,8 +6227,8 @@ define amdgpu_kernel void @flat_system_release_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -6313,8 +6313,8 @@ define amdgpu_kernel void @flat_system_release_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -6326,8 +6326,8 @@ define amdgpu_kernel void @flat_system_release_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
@@ -6370,8 +6370,8 @@ define amdgpu_kernel void @flat_system_acq_rel_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -6389,8 +6389,8 @@ define amdgpu_kernel void @flat_system_acq_rel_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -6475,8 +6475,8 @@ define amdgpu_kernel void @flat_system_acq_rel_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -6488,8 +6488,8 @@ define amdgpu_kernel void @flat_system_acq_rel_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
@@ -6532,8 +6532,8 @@ define amdgpu_kernel void @flat_system_seq_cst_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
@@ -6551,8 +6551,8 @@ define amdgpu_kernel void @flat_system_seq_cst_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
@@ -6637,8 +6637,8 @@ define amdgpu_kernel void @flat_system_seq_cst_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
;
@@ -6650,8 +6650,8 @@ define amdgpu_kernel void @flat_system_seq_cst_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
@@ -6953,8 +6953,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_load(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_load_dword v2, v[0:1] glc dlc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s2
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -6969,8 +6969,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_load(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_load_dword v2, v[0:1] glc dlc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -7058,8 +7058,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_load(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-WGP-NEXT: flat_load_b32 v2, v[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
@@ -7072,8 +7072,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_load(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-CU-NEXT: flat_load_b32 v2, v[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
@@ -7109,8 +7109,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_load(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: flat_load_dword v2, v[0:1] glc dlc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s2
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -7125,8 +7125,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_load(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: flat_load_dword v2, v[0:1] glc dlc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -7214,8 +7214,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_load(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-WGP-NEXT: flat_load_b32 v2, v[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
@@ -7228,8 +7228,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_load(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-CU-NEXT: flat_load_b32 v2, v[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
@@ -7844,8 +7844,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v2, s2
; GFX10-WGP-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_one_as_acquire_atomicrmw:
@@ -7859,8 +7859,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s2
; GFX10-CU-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_one_as_acquire_atomicrmw:
@@ -7935,8 +7935,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_atomicrmw(
; GFX11-WGP-NEXT: v_mov_b32_e32 v2, s0
; GFX11-WGP-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_one_as_acquire_atomicrmw:
@@ -7949,8 +7949,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in) {
entry:
@@ -8101,8 +8101,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v2, s2
; GFX10-WGP-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_one_as_acq_rel_atomicrmw:
@@ -8116,8 +8116,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s2
; GFX10-CU-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_one_as_acq_rel_atomicrmw:
@@ -8196,8 +8196,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_atomicrmw(
; GFX11-WGP-NEXT: v_mov_b32_e32 v2, s0
; GFX11-WGP-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_one_as_acq_rel_atomicrmw:
@@ -8210,8 +8210,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in) {
entry:
@@ -8244,8 +8244,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v2, s2
; GFX10-WGP-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_one_as_seq_cst_atomicrmw:
@@ -8259,8 +8259,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s2
; GFX10-CU-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_one_as_seq_cst_atomicrmw:
@@ -8339,8 +8339,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_atomicrmw(
; GFX11-WGP-NEXT: v_mov_b32_e32 v2, s0
; GFX11-WGP-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_one_as_seq_cst_atomicrmw:
@@ -8353,8 +8353,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in) {
entry:
@@ -8389,8 +8389,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_ret_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v2, s2
; GFX10-WGP-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
; GFX10-WGP-NEXT: s_endpgm
@@ -8406,8 +8406,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s2
; GFX10-CU-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
@@ -8491,8 +8491,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_ret_atomicrmw(
; GFX11-WGP-NEXT: v_mov_b32_e32 v2, s0
; GFX11-WGP-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -8507,8 +8507,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_ret_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -8546,8 +8546,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_ret_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v2, s2
; GFX10-WGP-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
; GFX10-WGP-NEXT: s_endpgm
@@ -8563,8 +8563,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s2
; GFX10-CU-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
@@ -8652,8 +8652,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_ret_atomicrmw(
; GFX11-WGP-NEXT: v_mov_b32_e32 v2, s0
; GFX11-WGP-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -8668,8 +8668,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_ret_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -8707,8 +8707,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_ret_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v2, s2
; GFX10-WGP-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: flat_store_dword v[0:1], v2
; GFX10-WGP-NEXT: s_endpgm
@@ -8724,8 +8724,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s2
; GFX10-CU-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
@@ -8813,8 +8813,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_ret_atomicrmw(
; GFX11-WGP-NEXT: v_mov_b32_e32 v2, s0
; GFX11-WGP-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -8829,8 +8829,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_ret_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -8982,8 +8982,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_monotonic_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_one_as_acquire_monotonic_cmpxchg:
@@ -8998,8 +8998,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_one_as_acquire_monotonic_cmpxchg:
@@ -9070,8 +9070,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_monotonic_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_one_as_acquire_monotonic_cmpxchg:
@@ -9082,8 +9082,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -9237,8 +9237,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_monotonic_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_one_as_acq_rel_monotonic_cmpxchg:
@@ -9253,8 +9253,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_one_as_acq_rel_monotonic_cmpxchg:
@@ -9329,8 +9329,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_monotonic_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_one_as_acq_rel_monotonic_cmpxchg:
@@ -9341,8 +9341,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -9379,8 +9379,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_monotonic_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_one_as_seq_cst_monotonic_cmpxchg:
@@ -9395,8 +9395,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_one_as_seq_cst_monotonic_cmpxchg:
@@ -9471,8 +9471,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_monotonic_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_one_as_seq_cst_monotonic_cmpxchg:
@@ -9483,8 +9483,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -9521,8 +9521,8 @@ define amdgpu_kernel void @flat_system_one_as_monotonic_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_one_as_monotonic_acquire_cmpxchg:
@@ -9537,8 +9537,8 @@ define amdgpu_kernel void @flat_system_one_as_monotonic_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_one_as_monotonic_acquire_cmpxchg:
@@ -9609,8 +9609,8 @@ define amdgpu_kernel void @flat_system_one_as_monotonic_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_one_as_monotonic_acquire_cmpxchg:
@@ -9621,8 +9621,8 @@ define amdgpu_kernel void @flat_system_one_as_monotonic_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -9659,8 +9659,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_one_as_acquire_acquire_cmpxchg:
@@ -9675,8 +9675,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_one_as_acquire_acquire_cmpxchg:
@@ -9747,8 +9747,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_one_as_acquire_acquire_cmpxchg:
@@ -9759,8 +9759,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -9797,8 +9797,8 @@ define amdgpu_kernel void @flat_system_one_as_release_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_one_as_release_acquire_cmpxchg:
@@ -9813,8 +9813,8 @@ define amdgpu_kernel void @flat_system_one_as_release_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_one_as_release_acquire_cmpxchg:
@@ -9889,8 +9889,8 @@ define amdgpu_kernel void @flat_system_one_as_release_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_one_as_release_acquire_cmpxchg:
@@ -9901,8 +9901,8 @@ define amdgpu_kernel void @flat_system_one_as_release_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -9939,8 +9939,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_one_as_acq_rel_acquire_cmpxchg:
@@ -9955,8 +9955,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_one_as_acq_rel_acquire_cmpxchg:
@@ -10031,8 +10031,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_one_as_acq_rel_acquire_cmpxchg:
@@ -10043,8 +10043,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -10081,8 +10081,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_one_as_seq_cst_acquire_cmpxchg:
@@ -10097,8 +10097,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_one_as_seq_cst_acquire_cmpxchg:
@@ -10173,8 +10173,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_one_as_seq_cst_acquire_cmpxchg:
@@ -10185,8 +10185,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -10223,8 +10223,8 @@ define amdgpu_kernel void @flat_system_one_as_monotonic_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_one_as_monotonic_seq_cst_cmpxchg:
@@ -10239,8 +10239,8 @@ define amdgpu_kernel void @flat_system_one_as_monotonic_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_one_as_monotonic_seq_cst_cmpxchg:
@@ -10315,8 +10315,8 @@ define amdgpu_kernel void @flat_system_one_as_monotonic_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_one_as_monotonic_seq_cst_cmpxchg:
@@ -10327,8 +10327,8 @@ define amdgpu_kernel void @flat_system_one_as_monotonic_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -10365,8 +10365,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_one_as_acquire_seq_cst_cmpxchg:
@@ -10381,8 +10381,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_one_as_acquire_seq_cst_cmpxchg:
@@ -10457,8 +10457,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_one_as_acquire_seq_cst_cmpxchg:
@@ -10469,8 +10469,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -10507,8 +10507,8 @@ define amdgpu_kernel void @flat_system_one_as_release_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_one_as_release_seq_cst_cmpxchg:
@@ -10523,8 +10523,8 @@ define amdgpu_kernel void @flat_system_one_as_release_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_one_as_release_seq_cst_cmpxchg:
@@ -10599,8 +10599,8 @@ define amdgpu_kernel void @flat_system_one_as_release_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_one_as_release_seq_cst_cmpxchg:
@@ -10611,8 +10611,8 @@ define amdgpu_kernel void @flat_system_one_as_release_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -10649,8 +10649,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_one_as_acq_rel_seq_cst_cmpxchg:
@@ -10665,8 +10665,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_one_as_acq_rel_seq_cst_cmpxchg:
@@ -10741,8 +10741,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_one_as_acq_rel_seq_cst_cmpxchg:
@@ -10753,8 +10753,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -10791,8 +10791,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: flat_system_one_as_seq_cst_seq_cst_cmpxchg:
@@ -10807,8 +10807,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_system_one_as_seq_cst_seq_cst_cmpxchg:
@@ -10883,8 +10883,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: flat_system_one_as_seq_cst_seq_cst_cmpxchg:
@@ -10895,8 +10895,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr %out, i32 %in, i32 %old) {
entry:
@@ -11080,8 +11080,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_monotonic_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -11100,8 +11100,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -11186,8 +11186,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_monotonic_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -11200,8 +11200,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -11393,8 +11393,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_monotonic_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -11413,8 +11413,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -11503,8 +11503,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_monotonic_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -11517,8 +11517,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -11563,8 +11563,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_monotonic_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -11583,8 +11583,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -11673,8 +11673,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_monotonic_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -11687,8 +11687,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -11733,8 +11733,8 @@ define amdgpu_kernel void @flat_system_one_as_monotonic_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -11753,8 +11753,8 @@ define amdgpu_kernel void @flat_system_one_as_monotonic_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -11839,8 +11839,8 @@ define amdgpu_kernel void @flat_system_one_as_monotonic_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -11853,8 +11853,8 @@ define amdgpu_kernel void @flat_system_one_as_monotonic_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -11899,8 +11899,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -11919,8 +11919,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -12005,8 +12005,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -12019,8 +12019,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -12065,8 +12065,8 @@ define amdgpu_kernel void @flat_system_one_as_release_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -12085,8 +12085,8 @@ define amdgpu_kernel void @flat_system_one_as_release_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -12175,8 +12175,8 @@ define amdgpu_kernel void @flat_system_one_as_release_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -12189,8 +12189,8 @@ define amdgpu_kernel void @flat_system_one_as_release_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -12235,8 +12235,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -12255,8 +12255,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -12345,8 +12345,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -12359,8 +12359,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -12405,8 +12405,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -12425,8 +12425,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -12515,8 +12515,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -12529,8 +12529,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -12575,8 +12575,8 @@ define amdgpu_kernel void @flat_system_one_as_monotonic_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -12595,8 +12595,8 @@ define amdgpu_kernel void @flat_system_one_as_monotonic_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -12685,8 +12685,8 @@ define amdgpu_kernel void @flat_system_one_as_monotonic_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -12699,8 +12699,8 @@ define amdgpu_kernel void @flat_system_one_as_monotonic_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -12745,8 +12745,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -12765,8 +12765,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -12855,8 +12855,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -12869,8 +12869,8 @@ define amdgpu_kernel void @flat_system_one_as_acquire_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -12915,8 +12915,8 @@ define amdgpu_kernel void @flat_system_one_as_release_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -12935,8 +12935,8 @@ define amdgpu_kernel void @flat_system_one_as_release_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -13025,8 +13025,8 @@ define amdgpu_kernel void @flat_system_one_as_release_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -13039,8 +13039,8 @@ define amdgpu_kernel void @flat_system_one_as_release_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -13085,8 +13085,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -13105,8 +13105,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -13195,8 +13195,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -13209,8 +13209,8 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
@@ -13255,8 +13255,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v3, s3
; GFX10-WGP-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: v_mov_b32_e32 v0, s0
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s1
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
@@ -13275,8 +13275,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s3
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -13365,8 +13365,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-WGP-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: flat_store_b32 v[0:1], v2
; GFX11-WGP-NEXT: s_endpgm
@@ -13379,8 +13379,8 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-global-agent.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-global-agent.ll
index 0a7a07e..0449e41 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-global-agent.ll
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-global-agent.ll
@@ -317,8 +317,8 @@ define amdgpu_kernel void @global_agent_acquire_load(
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: global_load_dword v1, v0, s[0:1] glc dlc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v0, v1, s[2:3]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -329,8 +329,8 @@ define amdgpu_kernel void @global_agent_acquire_load(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: global_load_dword v1, v0, s[0:1] glc dlc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v0, v1, s[2:3]
; GFX10-CU-NEXT: s_endpgm
;
@@ -400,8 +400,8 @@ define amdgpu_kernel void @global_agent_acquire_load(
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: global_load_b32 v1, v0, s[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v0, v1, s[2:3]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -414,8 +414,8 @@ define amdgpu_kernel void @global_agent_acquire_load(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: global_load_b32 v1, v0, s[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[2:3]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -465,8 +465,8 @@ define amdgpu_kernel void @global_agent_seq_cst_load(
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: global_load_dword v1, v0, s[0:1] glc dlc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v0, v1, s[2:3]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -477,8 +477,8 @@ define amdgpu_kernel void @global_agent_seq_cst_load(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: global_load_dword v1, v0, s[0:1] glc dlc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v0, v1, s[2:3]
; GFX10-CU-NEXT: s_endpgm
;
@@ -548,8 +548,8 @@ define amdgpu_kernel void @global_agent_seq_cst_load(
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: global_load_b32 v1, v0, s[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v0, v1, s[2:3]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -562,8 +562,8 @@ define amdgpu_kernel void @global_agent_seq_cst_load(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: global_load_b32 v1, v0, s[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[2:3]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -1241,8 +1241,8 @@ define amdgpu_kernel void @global_agent_acquire_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s2
; GFX10-WGP-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_acquire_atomicrmw:
@@ -1255,8 +1255,8 @@ define amdgpu_kernel void @global_agent_acquire_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX10-CU-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_acquire_atomicrmw:
@@ -1328,8 +1328,8 @@ define amdgpu_kernel void @global_agent_acquire_atomicrmw(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-WGP-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_acquire_atomicrmw:
@@ -1341,8 +1341,8 @@ define amdgpu_kernel void @global_agent_acquire_atomicrmw(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-CU-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in) {
entry:
@@ -1514,8 +1514,8 @@ define amdgpu_kernel void @global_agent_acq_rel_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s2
; GFX10-WGP-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_acq_rel_atomicrmw:
@@ -1528,8 +1528,8 @@ define amdgpu_kernel void @global_agent_acq_rel_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX10-CU-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_acq_rel_atomicrmw:
@@ -1603,8 +1603,8 @@ define amdgpu_kernel void @global_agent_acq_rel_atomicrmw(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-WGP-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_acq_rel_atomicrmw:
@@ -1616,8 +1616,8 @@ define amdgpu_kernel void @global_agent_acq_rel_atomicrmw(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-CU-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in) {
entry:
@@ -1662,8 +1662,8 @@ define amdgpu_kernel void @global_agent_seq_cst_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s2
; GFX10-WGP-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_seq_cst_atomicrmw:
@@ -1676,8 +1676,8 @@ define amdgpu_kernel void @global_agent_seq_cst_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX10-CU-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_seq_cst_atomicrmw:
@@ -1751,8 +1751,8 @@ define amdgpu_kernel void @global_agent_seq_cst_atomicrmw(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-WGP-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_seq_cst_atomicrmw:
@@ -1764,8 +1764,8 @@ define amdgpu_kernel void @global_agent_seq_cst_atomicrmw(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-CU-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in) {
entry:
@@ -1812,8 +1812,8 @@ define amdgpu_kernel void @global_agent_acquire_ret_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s2
; GFX10-WGP-NEXT: global_atomic_swap v1, v0, v1, s[0:1] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -1827,8 +1827,8 @@ define amdgpu_kernel void @global_agent_acquire_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX10-CU-NEXT: global_atomic_swap v1, v0, v1, s[0:1] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -1906,8 +1906,8 @@ define amdgpu_kernel void @global_agent_acquire_ret_atomicrmw(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-WGP-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -1922,8 +1922,8 @@ define amdgpu_kernel void @global_agent_acquire_ret_atomicrmw(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-CU-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -1974,8 +1974,8 @@ define amdgpu_kernel void @global_agent_acq_rel_ret_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s2
; GFX10-WGP-NEXT: global_atomic_swap v1, v0, v1, s[0:1] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -1989,8 +1989,8 @@ define amdgpu_kernel void @global_agent_acq_rel_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX10-CU-NEXT: global_atomic_swap v1, v0, v1, s[0:1] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -2070,8 +2070,8 @@ define amdgpu_kernel void @global_agent_acq_rel_ret_atomicrmw(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-WGP-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -2086,8 +2086,8 @@ define amdgpu_kernel void @global_agent_acq_rel_ret_atomicrmw(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-CU-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -2138,8 +2138,8 @@ define amdgpu_kernel void @global_agent_seq_cst_ret_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s2
; GFX10-WGP-NEXT: global_atomic_swap v1, v0, v1, s[0:1] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -2153,8 +2153,8 @@ define amdgpu_kernel void @global_agent_seq_cst_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX10-CU-NEXT: global_atomic_swap v1, v0, v1, s[0:1] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -2234,8 +2234,8 @@ define amdgpu_kernel void @global_agent_seq_cst_ret_atomicrmw(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-WGP-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -2250,8 +2250,8 @@ define amdgpu_kernel void @global_agent_seq_cst_ret_atomicrmw(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-CU-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -2427,8 +2427,8 @@ define amdgpu_kernel void @global_agent_acquire_monotonic_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_acquire_monotonic_cmpxchg:
@@ -2440,8 +2440,8 @@ define amdgpu_kernel void @global_agent_acquire_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_acquire_monotonic_cmpxchg:
@@ -2510,8 +2510,8 @@ define amdgpu_kernel void @global_agent_acquire_monotonic_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_acquire_monotonic_cmpxchg:
@@ -2522,8 +2522,8 @@ define amdgpu_kernel void @global_agent_acquire_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -2698,8 +2698,8 @@ define amdgpu_kernel void @global_agent_acq_rel_monotonic_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_acq_rel_monotonic_cmpxchg:
@@ -2711,8 +2711,8 @@ define amdgpu_kernel void @global_agent_acq_rel_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_acq_rel_monotonic_cmpxchg:
@@ -2783,8 +2783,8 @@ define amdgpu_kernel void @global_agent_acq_rel_monotonic_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_acq_rel_monotonic_cmpxchg:
@@ -2795,8 +2795,8 @@ define amdgpu_kernel void @global_agent_acq_rel_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -2845,8 +2845,8 @@ define amdgpu_kernel void @global_agent_seq_cst_monotonic_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_seq_cst_monotonic_cmpxchg:
@@ -2858,8 +2858,8 @@ define amdgpu_kernel void @global_agent_seq_cst_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_seq_cst_monotonic_cmpxchg:
@@ -2930,8 +2930,8 @@ define amdgpu_kernel void @global_agent_seq_cst_monotonic_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_seq_cst_monotonic_cmpxchg:
@@ -2942,8 +2942,8 @@ define amdgpu_kernel void @global_agent_seq_cst_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -2992,8 +2992,8 @@ define amdgpu_kernel void @global_agent_monotonic_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_monotonic_acquire_cmpxchg:
@@ -3005,8 +3005,8 @@ define amdgpu_kernel void @global_agent_monotonic_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_monotonic_acquire_cmpxchg:
@@ -3075,8 +3075,8 @@ define amdgpu_kernel void @global_agent_monotonic_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_monotonic_acquire_cmpxchg:
@@ -3087,8 +3087,8 @@ define amdgpu_kernel void @global_agent_monotonic_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -3137,8 +3137,8 @@ define amdgpu_kernel void @global_agent_acquire_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_acquire_acquire_cmpxchg:
@@ -3150,8 +3150,8 @@ define amdgpu_kernel void @global_agent_acquire_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_acquire_acquire_cmpxchg:
@@ -3220,8 +3220,8 @@ define amdgpu_kernel void @global_agent_acquire_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_acquire_acquire_cmpxchg:
@@ -3232,8 +3232,8 @@ define amdgpu_kernel void @global_agent_acquire_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -3282,8 +3282,8 @@ define amdgpu_kernel void @global_agent_release_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_release_acquire_cmpxchg:
@@ -3295,8 +3295,8 @@ define amdgpu_kernel void @global_agent_release_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_release_acquire_cmpxchg:
@@ -3367,8 +3367,8 @@ define amdgpu_kernel void @global_agent_release_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_release_acquire_cmpxchg:
@@ -3379,8 +3379,8 @@ define amdgpu_kernel void @global_agent_release_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -3429,8 +3429,8 @@ define amdgpu_kernel void @global_agent_acq_rel_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_acq_rel_acquire_cmpxchg:
@@ -3442,8 +3442,8 @@ define amdgpu_kernel void @global_agent_acq_rel_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_acq_rel_acquire_cmpxchg:
@@ -3514,8 +3514,8 @@ define amdgpu_kernel void @global_agent_acq_rel_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_acq_rel_acquire_cmpxchg:
@@ -3526,8 +3526,8 @@ define amdgpu_kernel void @global_agent_acq_rel_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -3576,8 +3576,8 @@ define amdgpu_kernel void @global_agent_seq_cst_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_seq_cst_acquire_cmpxchg:
@@ -3589,8 +3589,8 @@ define amdgpu_kernel void @global_agent_seq_cst_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_seq_cst_acquire_cmpxchg:
@@ -3661,8 +3661,8 @@ define amdgpu_kernel void @global_agent_seq_cst_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_seq_cst_acquire_cmpxchg:
@@ -3673,8 +3673,8 @@ define amdgpu_kernel void @global_agent_seq_cst_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -3723,8 +3723,8 @@ define amdgpu_kernel void @global_agent_monotonic_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_monotonic_seq_cst_cmpxchg:
@@ -3736,8 +3736,8 @@ define amdgpu_kernel void @global_agent_monotonic_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_monotonic_seq_cst_cmpxchg:
@@ -3808,8 +3808,8 @@ define amdgpu_kernel void @global_agent_monotonic_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_monotonic_seq_cst_cmpxchg:
@@ -3820,8 +3820,8 @@ define amdgpu_kernel void @global_agent_monotonic_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -3870,8 +3870,8 @@ define amdgpu_kernel void @global_agent_acquire_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_acquire_seq_cst_cmpxchg:
@@ -3883,8 +3883,8 @@ define amdgpu_kernel void @global_agent_acquire_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_acquire_seq_cst_cmpxchg:
@@ -3955,8 +3955,8 @@ define amdgpu_kernel void @global_agent_acquire_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_acquire_seq_cst_cmpxchg:
@@ -3967,8 +3967,8 @@ define amdgpu_kernel void @global_agent_acquire_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -4017,8 +4017,8 @@ define amdgpu_kernel void @global_agent_release_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_release_seq_cst_cmpxchg:
@@ -4030,8 +4030,8 @@ define amdgpu_kernel void @global_agent_release_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_release_seq_cst_cmpxchg:
@@ -4102,8 +4102,8 @@ define amdgpu_kernel void @global_agent_release_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_release_seq_cst_cmpxchg:
@@ -4114,8 +4114,8 @@ define amdgpu_kernel void @global_agent_release_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -4164,8 +4164,8 @@ define amdgpu_kernel void @global_agent_acq_rel_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_acq_rel_seq_cst_cmpxchg:
@@ -4177,8 +4177,8 @@ define amdgpu_kernel void @global_agent_acq_rel_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_acq_rel_seq_cst_cmpxchg:
@@ -4249,8 +4249,8 @@ define amdgpu_kernel void @global_agent_acq_rel_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_acq_rel_seq_cst_cmpxchg:
@@ -4261,8 +4261,8 @@ define amdgpu_kernel void @global_agent_acq_rel_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -4311,8 +4311,8 @@ define amdgpu_kernel void @global_agent_seq_cst_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_seq_cst_seq_cst_cmpxchg:
@@ -4324,8 +4324,8 @@ define amdgpu_kernel void @global_agent_seq_cst_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_seq_cst_seq_cst_cmpxchg:
@@ -4396,8 +4396,8 @@ define amdgpu_kernel void @global_agent_seq_cst_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_seq_cst_seq_cst_cmpxchg:
@@ -4408,8 +4408,8 @@ define amdgpu_kernel void @global_agent_seq_cst_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -4612,8 +4612,8 @@ define amdgpu_kernel void @global_agent_acquire_monotonic_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -4626,8 +4626,8 @@ define amdgpu_kernel void @global_agent_acquire_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -4702,8 +4702,8 @@ define amdgpu_kernel void @global_agent_acquire_monotonic_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -4717,8 +4717,8 @@ define amdgpu_kernel void @global_agent_acquire_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -4928,8 +4928,8 @@ define amdgpu_kernel void @global_agent_acq_rel_monotonic_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -4942,8 +4942,8 @@ define amdgpu_kernel void @global_agent_acq_rel_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -5020,8 +5020,8 @@ define amdgpu_kernel void @global_agent_acq_rel_monotonic_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5035,8 +5035,8 @@ define amdgpu_kernel void @global_agent_acq_rel_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5094,8 +5094,8 @@ define amdgpu_kernel void @global_agent_seq_cst_monotonic_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -5108,8 +5108,8 @@ define amdgpu_kernel void @global_agent_seq_cst_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -5186,8 +5186,8 @@ define amdgpu_kernel void @global_agent_seq_cst_monotonic_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5201,8 +5201,8 @@ define amdgpu_kernel void @global_agent_seq_cst_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5260,8 +5260,8 @@ define amdgpu_kernel void @global_agent_monotonic_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -5274,8 +5274,8 @@ define amdgpu_kernel void @global_agent_monotonic_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -5350,8 +5350,8 @@ define amdgpu_kernel void @global_agent_monotonic_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5365,8 +5365,8 @@ define amdgpu_kernel void @global_agent_monotonic_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5424,8 +5424,8 @@ define amdgpu_kernel void @global_agent_acquire_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -5438,8 +5438,8 @@ define amdgpu_kernel void @global_agent_acquire_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -5514,8 +5514,8 @@ define amdgpu_kernel void @global_agent_acquire_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5529,8 +5529,8 @@ define amdgpu_kernel void @global_agent_acquire_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5588,8 +5588,8 @@ define amdgpu_kernel void @global_agent_release_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -5602,8 +5602,8 @@ define amdgpu_kernel void @global_agent_release_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -5680,8 +5680,8 @@ define amdgpu_kernel void @global_agent_release_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5695,8 +5695,8 @@ define amdgpu_kernel void @global_agent_release_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5754,8 +5754,8 @@ define amdgpu_kernel void @global_agent_acq_rel_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -5768,8 +5768,8 @@ define amdgpu_kernel void @global_agent_acq_rel_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -5846,8 +5846,8 @@ define amdgpu_kernel void @global_agent_acq_rel_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5861,8 +5861,8 @@ define amdgpu_kernel void @global_agent_acq_rel_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5920,8 +5920,8 @@ define amdgpu_kernel void @global_agent_seq_cst_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -5934,8 +5934,8 @@ define amdgpu_kernel void @global_agent_seq_cst_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -6012,8 +6012,8 @@ define amdgpu_kernel void @global_agent_seq_cst_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6027,8 +6027,8 @@ define amdgpu_kernel void @global_agent_seq_cst_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6086,8 +6086,8 @@ define amdgpu_kernel void @global_agent_monotonic_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -6100,8 +6100,8 @@ define amdgpu_kernel void @global_agent_monotonic_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -6178,8 +6178,8 @@ define amdgpu_kernel void @global_agent_monotonic_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6193,8 +6193,8 @@ define amdgpu_kernel void @global_agent_monotonic_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6252,8 +6252,8 @@ define amdgpu_kernel void @global_agent_acquire_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -6266,8 +6266,8 @@ define amdgpu_kernel void @global_agent_acquire_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -6344,8 +6344,8 @@ define amdgpu_kernel void @global_agent_acquire_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6359,8 +6359,8 @@ define amdgpu_kernel void @global_agent_acquire_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6418,8 +6418,8 @@ define amdgpu_kernel void @global_agent_release_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -6432,8 +6432,8 @@ define amdgpu_kernel void @global_agent_release_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -6510,8 +6510,8 @@ define amdgpu_kernel void @global_agent_release_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6525,8 +6525,8 @@ define amdgpu_kernel void @global_agent_release_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6584,8 +6584,8 @@ define amdgpu_kernel void @global_agent_acq_rel_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -6598,8 +6598,8 @@ define amdgpu_kernel void @global_agent_acq_rel_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -6676,8 +6676,8 @@ define amdgpu_kernel void @global_agent_acq_rel_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6691,8 +6691,8 @@ define amdgpu_kernel void @global_agent_acq_rel_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6750,8 +6750,8 @@ define amdgpu_kernel void @global_agent_seq_cst_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -6764,8 +6764,8 @@ define amdgpu_kernel void @global_agent_seq_cst_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -6842,8 +6842,8 @@ define amdgpu_kernel void @global_agent_seq_cst_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6857,8 +6857,8 @@ define amdgpu_kernel void @global_agent_seq_cst_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -7178,8 +7178,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_load(
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: global_load_dword v1, v0, s[0:1] glc dlc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v0, v1, s[2:3]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -7190,8 +7190,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_load(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: global_load_dword v1, v0, s[0:1] glc dlc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v0, v1, s[2:3]
; GFX10-CU-NEXT: s_endpgm
;
@@ -7261,8 +7261,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_load(
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: global_load_b32 v1, v0, s[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v0, v1, s[2:3]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -7275,8 +7275,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_load(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: global_load_b32 v1, v0, s[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[2:3]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -7326,8 +7326,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_load(
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: global_load_dword v1, v0, s[0:1] glc dlc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v0, v1, s[2:3]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -7338,8 +7338,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_load(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: global_load_dword v1, v0, s[0:1] glc dlc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v0, v1, s[2:3]
; GFX10-CU-NEXT: s_endpgm
;
@@ -7409,8 +7409,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_load(
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: global_load_b32 v1, v0, s[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v0, v1, s[2:3]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -7423,8 +7423,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_load(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: global_load_b32 v1, v0, s[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[2:3]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -8102,8 +8102,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s2
; GFX10-WGP-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_one_as_acquire_atomicrmw:
@@ -8116,8 +8116,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX10-CU-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_one_as_acquire_atomicrmw:
@@ -8189,8 +8189,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_atomicrmw(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-WGP-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_one_as_acquire_atomicrmw:
@@ -8202,8 +8202,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_atomicrmw(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-CU-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in) {
entry:
@@ -8375,8 +8375,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s2
; GFX10-WGP-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_one_as_acq_rel_atomicrmw:
@@ -8389,8 +8389,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX10-CU-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_one_as_acq_rel_atomicrmw:
@@ -8464,8 +8464,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_atomicrmw(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-WGP-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_one_as_acq_rel_atomicrmw:
@@ -8477,8 +8477,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_atomicrmw(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-CU-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in) {
entry:
@@ -8523,8 +8523,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s2
; GFX10-WGP-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_one_as_seq_cst_atomicrmw:
@@ -8537,8 +8537,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX10-CU-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_one_as_seq_cst_atomicrmw:
@@ -8612,8 +8612,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_atomicrmw(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-WGP-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_one_as_seq_cst_atomicrmw:
@@ -8625,8 +8625,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_atomicrmw(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-CU-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in) {
entry:
@@ -8673,8 +8673,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_ret_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s2
; GFX10-WGP-NEXT: global_atomic_swap v1, v0, v1, s[0:1] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -8688,8 +8688,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX10-CU-NEXT: global_atomic_swap v1, v0, v1, s[0:1] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -8767,8 +8767,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_ret_atomicrmw(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-WGP-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -8783,8 +8783,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_ret_atomicrmw(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-CU-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -8835,8 +8835,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_ret_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s2
; GFX10-WGP-NEXT: global_atomic_swap v1, v0, v1, s[0:1] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -8850,8 +8850,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX10-CU-NEXT: global_atomic_swap v1, v0, v1, s[0:1] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -8931,8 +8931,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_ret_atomicrmw(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-WGP-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -8947,8 +8947,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_ret_atomicrmw(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-CU-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -8999,8 +8999,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_ret_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s2
; GFX10-WGP-NEXT: global_atomic_swap v1, v0, v1, s[0:1] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -9014,8 +9014,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX10-CU-NEXT: global_atomic_swap v1, v0, v1, s[0:1] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -9095,8 +9095,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_ret_atomicrmw(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-WGP-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -9111,8 +9111,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_ret_atomicrmw(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-CU-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -9288,8 +9288,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_monotonic_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_one_as_acquire_monotonic_cmpxchg:
@@ -9301,8 +9301,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_one_as_acquire_monotonic_cmpxchg:
@@ -9371,8 +9371,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_monotonic_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_one_as_acquire_monotonic_cmpxchg:
@@ -9383,8 +9383,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -9559,8 +9559,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_monotonic_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_one_as_acq_rel_monotonic_cmpxchg:
@@ -9572,8 +9572,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_one_as_acq_rel_monotonic_cmpxchg:
@@ -9644,8 +9644,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_monotonic_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_one_as_acq_rel_monotonic_cmpxchg:
@@ -9656,8 +9656,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -9706,8 +9706,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_monotonic_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_one_as_seq_cst_monotonic_cmpxchg:
@@ -9719,8 +9719,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_one_as_seq_cst_monotonic_cmpxchg:
@@ -9791,8 +9791,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_monotonic_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_one_as_seq_cst_monotonic_cmpxchg:
@@ -9803,8 +9803,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -9853,8 +9853,8 @@ define amdgpu_kernel void @global_agent_one_as_monotonic_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_one_as_monotonic_acquire_cmpxchg:
@@ -9866,8 +9866,8 @@ define amdgpu_kernel void @global_agent_one_as_monotonic_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_one_as_monotonic_acquire_cmpxchg:
@@ -9936,8 +9936,8 @@ define amdgpu_kernel void @global_agent_one_as_monotonic_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_one_as_monotonic_acquire_cmpxchg:
@@ -9948,8 +9948,8 @@ define amdgpu_kernel void @global_agent_one_as_monotonic_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -9998,8 +9998,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_one_as_acquire_acquire_cmpxchg:
@@ -10011,8 +10011,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_one_as_acquire_acquire_cmpxchg:
@@ -10081,8 +10081,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_one_as_acquire_acquire_cmpxchg:
@@ -10093,8 +10093,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -10143,8 +10143,8 @@ define amdgpu_kernel void @global_agent_one_as_release_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_one_as_release_acquire_cmpxchg:
@@ -10156,8 +10156,8 @@ define amdgpu_kernel void @global_agent_one_as_release_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_one_as_release_acquire_cmpxchg:
@@ -10228,8 +10228,8 @@ define amdgpu_kernel void @global_agent_one_as_release_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_one_as_release_acquire_cmpxchg:
@@ -10240,8 +10240,8 @@ define amdgpu_kernel void @global_agent_one_as_release_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -10290,8 +10290,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_one_as_acq_rel_acquire_cmpxchg:
@@ -10303,8 +10303,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_one_as_acq_rel_acquire_cmpxchg:
@@ -10375,8 +10375,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_one_as_acq_rel_acquire_cmpxchg:
@@ -10387,8 +10387,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -10437,8 +10437,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_one_as_seq_cst_acquire_cmpxchg:
@@ -10450,8 +10450,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_one_as_seq_cst_acquire_cmpxchg:
@@ -10522,8 +10522,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_one_as_seq_cst_acquire_cmpxchg:
@@ -10534,8 +10534,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -10584,8 +10584,8 @@ define amdgpu_kernel void @global_agent_one_as_monotonic_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_one_as_monotonic_seq_cst_cmpxchg:
@@ -10597,8 +10597,8 @@ define amdgpu_kernel void @global_agent_one_as_monotonic_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_one_as_monotonic_seq_cst_cmpxchg:
@@ -10669,8 +10669,8 @@ define amdgpu_kernel void @global_agent_one_as_monotonic_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_one_as_monotonic_seq_cst_cmpxchg:
@@ -10681,8 +10681,8 @@ define amdgpu_kernel void @global_agent_one_as_monotonic_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -10731,8 +10731,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_one_as_acquire_seq_cst_cmpxchg:
@@ -10744,8 +10744,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_one_as_acquire_seq_cst_cmpxchg:
@@ -10816,8 +10816,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_one_as_acquire_seq_cst_cmpxchg:
@@ -10828,8 +10828,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -10878,8 +10878,8 @@ define amdgpu_kernel void @global_agent_one_as_release_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_one_as_release_seq_cst_cmpxchg:
@@ -10891,8 +10891,8 @@ define amdgpu_kernel void @global_agent_one_as_release_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_one_as_release_seq_cst_cmpxchg:
@@ -10963,8 +10963,8 @@ define amdgpu_kernel void @global_agent_one_as_release_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_one_as_release_seq_cst_cmpxchg:
@@ -10975,8 +10975,8 @@ define amdgpu_kernel void @global_agent_one_as_release_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -11025,8 +11025,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_one_as_acq_rel_seq_cst_cmpxchg:
@@ -11038,8 +11038,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_one_as_acq_rel_seq_cst_cmpxchg:
@@ -11110,8 +11110,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_one_as_acq_rel_seq_cst_cmpxchg:
@@ -11122,8 +11122,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -11172,8 +11172,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_agent_one_as_seq_cst_seq_cst_cmpxchg:
@@ -11185,8 +11185,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_agent_one_as_seq_cst_seq_cst_cmpxchg:
@@ -11257,8 +11257,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_agent_one_as_seq_cst_seq_cst_cmpxchg:
@@ -11269,8 +11269,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -11473,8 +11473,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_monotonic_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -11487,8 +11487,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -11563,8 +11563,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_monotonic_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -11578,8 +11578,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -11637,8 +11637,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_monotonic_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -11651,8 +11651,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -11729,8 +11729,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_monotonic_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -11744,8 +11744,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -11803,8 +11803,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_monotonic_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -11817,8 +11817,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -11895,8 +11895,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_monotonic_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -11910,8 +11910,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -11969,8 +11969,8 @@ define amdgpu_kernel void @global_agent_one_as_monotonic_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -11983,8 +11983,8 @@ define amdgpu_kernel void @global_agent_one_as_monotonic_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -12059,8 +12059,8 @@ define amdgpu_kernel void @global_agent_one_as_monotonic_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -12074,8 +12074,8 @@ define amdgpu_kernel void @global_agent_one_as_monotonic_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -12133,8 +12133,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -12147,8 +12147,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -12223,8 +12223,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -12238,8 +12238,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -12297,8 +12297,8 @@ define amdgpu_kernel void @global_agent_one_as_release_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -12311,8 +12311,8 @@ define amdgpu_kernel void @global_agent_one_as_release_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -12389,8 +12389,8 @@ define amdgpu_kernel void @global_agent_one_as_release_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -12404,8 +12404,8 @@ define amdgpu_kernel void @global_agent_one_as_release_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -12463,8 +12463,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -12477,8 +12477,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -12555,8 +12555,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -12570,8 +12570,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -12629,8 +12629,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -12643,8 +12643,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -12721,8 +12721,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -12736,8 +12736,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -12795,8 +12795,8 @@ define amdgpu_kernel void @global_agent_one_as_monotonic_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -12809,8 +12809,8 @@ define amdgpu_kernel void @global_agent_one_as_monotonic_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -12887,8 +12887,8 @@ define amdgpu_kernel void @global_agent_one_as_monotonic_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -12902,8 +12902,8 @@ define amdgpu_kernel void @global_agent_one_as_monotonic_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -12961,8 +12961,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -12975,8 +12975,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -13053,8 +13053,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -13068,8 +13068,8 @@ define amdgpu_kernel void @global_agent_one_as_acquire_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -13127,8 +13127,8 @@ define amdgpu_kernel void @global_agent_one_as_release_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -13141,8 +13141,8 @@ define amdgpu_kernel void @global_agent_one_as_release_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -13219,8 +13219,8 @@ define amdgpu_kernel void @global_agent_one_as_release_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -13234,8 +13234,8 @@ define amdgpu_kernel void @global_agent_one_as_release_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -13293,8 +13293,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -13307,8 +13307,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -13385,8 +13385,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -13400,8 +13400,8 @@ define amdgpu_kernel void @global_agent_one_as_acq_rel_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -13459,8 +13459,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -13473,8 +13473,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -13551,8 +13551,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -13566,8 +13566,8 @@ define amdgpu_kernel void @global_agent_one_as_seq_cst_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-global-system.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-global-system.ll
index 97b2f08..f611003 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-global-system.ll
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-global-system.ll
@@ -317,8 +317,8 @@ define amdgpu_kernel void @global_system_acquire_load(
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: global_load_dword v1, v0, s[0:1] glc dlc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v0, v1, s[2:3]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -329,8 +329,8 @@ define amdgpu_kernel void @global_system_acquire_load(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: global_load_dword v1, v0, s[0:1] glc dlc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v0, v1, s[2:3]
; GFX10-CU-NEXT: s_endpgm
;
@@ -402,8 +402,8 @@ define amdgpu_kernel void @global_system_acquire_load(
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: global_load_b32 v1, v0, s[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v0, v1, s[2:3]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -416,8 +416,8 @@ define amdgpu_kernel void @global_system_acquire_load(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: global_load_b32 v1, v0, s[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[2:3]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -467,8 +467,8 @@ define amdgpu_kernel void @global_system_seq_cst_load(
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: global_load_dword v1, v0, s[0:1] glc dlc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v0, v1, s[2:3]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -479,8 +479,8 @@ define amdgpu_kernel void @global_system_seq_cst_load(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: global_load_dword v1, v0, s[0:1] glc dlc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v0, v1, s[2:3]
; GFX10-CU-NEXT: s_endpgm
;
@@ -552,8 +552,8 @@ define amdgpu_kernel void @global_system_seq_cst_load(
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: global_load_b32 v1, v0, s[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v0, v1, s[2:3]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -566,8 +566,8 @@ define amdgpu_kernel void @global_system_seq_cst_load(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: global_load_b32 v1, v0, s[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[2:3]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -1249,8 +1249,8 @@ define amdgpu_kernel void @global_system_acquire_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s2
; GFX10-WGP-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_acquire_atomicrmw:
@@ -1263,8 +1263,8 @@ define amdgpu_kernel void @global_system_acquire_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX10-CU-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_acquire_atomicrmw:
@@ -1338,8 +1338,8 @@ define amdgpu_kernel void @global_system_acquire_atomicrmw(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-WGP-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_acquire_atomicrmw:
@@ -1351,8 +1351,8 @@ define amdgpu_kernel void @global_system_acquire_atomicrmw(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-CU-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in) {
entry:
@@ -1526,8 +1526,8 @@ define amdgpu_kernel void @global_system_acq_rel_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s2
; GFX10-WGP-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_acq_rel_atomicrmw:
@@ -1540,8 +1540,8 @@ define amdgpu_kernel void @global_system_acq_rel_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX10-CU-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_acq_rel_atomicrmw:
@@ -1619,8 +1619,8 @@ define amdgpu_kernel void @global_system_acq_rel_atomicrmw(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-WGP-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_acq_rel_atomicrmw:
@@ -1632,8 +1632,8 @@ define amdgpu_kernel void @global_system_acq_rel_atomicrmw(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-CU-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in) {
entry:
@@ -1678,8 +1678,8 @@ define amdgpu_kernel void @global_system_seq_cst_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s2
; GFX10-WGP-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_seq_cst_atomicrmw:
@@ -1692,8 +1692,8 @@ define amdgpu_kernel void @global_system_seq_cst_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX10-CU-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_seq_cst_atomicrmw:
@@ -1771,8 +1771,8 @@ define amdgpu_kernel void @global_system_seq_cst_atomicrmw(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-WGP-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_seq_cst_atomicrmw:
@@ -1784,8 +1784,8 @@ define amdgpu_kernel void @global_system_seq_cst_atomicrmw(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-CU-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in) {
entry:
@@ -1832,8 +1832,8 @@ define amdgpu_kernel void @global_system_acquire_ret_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s2
; GFX10-WGP-NEXT: global_atomic_swap v1, v0, v1, s[0:1] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -1847,8 +1847,8 @@ define amdgpu_kernel void @global_system_acquire_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX10-CU-NEXT: global_atomic_swap v1, v0, v1, s[0:1] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -1928,8 +1928,8 @@ define amdgpu_kernel void @global_system_acquire_ret_atomicrmw(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-WGP-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -1944,8 +1944,8 @@ define amdgpu_kernel void @global_system_acquire_ret_atomicrmw(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-CU-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -1996,8 +1996,8 @@ define amdgpu_kernel void @global_system_acq_rel_ret_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s2
; GFX10-WGP-NEXT: global_atomic_swap v1, v0, v1, s[0:1] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -2011,8 +2011,8 @@ define amdgpu_kernel void @global_system_acq_rel_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX10-CU-NEXT: global_atomic_swap v1, v0, v1, s[0:1] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -2096,8 +2096,8 @@ define amdgpu_kernel void @global_system_acq_rel_ret_atomicrmw(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-WGP-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -2112,8 +2112,8 @@ define amdgpu_kernel void @global_system_acq_rel_ret_atomicrmw(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-CU-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -2164,8 +2164,8 @@ define amdgpu_kernel void @global_system_seq_cst_ret_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s2
; GFX10-WGP-NEXT: global_atomic_swap v1, v0, v1, s[0:1] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -2179,8 +2179,8 @@ define amdgpu_kernel void @global_system_seq_cst_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX10-CU-NEXT: global_atomic_swap v1, v0, v1, s[0:1] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -2264,8 +2264,8 @@ define amdgpu_kernel void @global_system_seq_cst_ret_atomicrmw(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-WGP-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -2280,8 +2280,8 @@ define amdgpu_kernel void @global_system_seq_cst_ret_atomicrmw(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-CU-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -2457,8 +2457,8 @@ define amdgpu_kernel void @global_system_acquire_monotonic_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_acquire_monotonic_cmpxchg:
@@ -2470,8 +2470,8 @@ define amdgpu_kernel void @global_system_acquire_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_acquire_monotonic_cmpxchg:
@@ -2542,8 +2542,8 @@ define amdgpu_kernel void @global_system_acquire_monotonic_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_acquire_monotonic_cmpxchg:
@@ -2554,8 +2554,8 @@ define amdgpu_kernel void @global_system_acquire_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -2732,8 +2732,8 @@ define amdgpu_kernel void @global_system_acq_rel_monotonic_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_acq_rel_monotonic_cmpxchg:
@@ -2745,8 +2745,8 @@ define amdgpu_kernel void @global_system_acq_rel_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_acq_rel_monotonic_cmpxchg:
@@ -2821,8 +2821,8 @@ define amdgpu_kernel void @global_system_acq_rel_monotonic_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_acq_rel_monotonic_cmpxchg:
@@ -2833,8 +2833,8 @@ define amdgpu_kernel void @global_system_acq_rel_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -2883,8 +2883,8 @@ define amdgpu_kernel void @global_system_seq_cst_monotonic_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_seq_cst_monotonic_cmpxchg:
@@ -2896,8 +2896,8 @@ define amdgpu_kernel void @global_system_seq_cst_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_seq_cst_monotonic_cmpxchg:
@@ -2972,8 +2972,8 @@ define amdgpu_kernel void @global_system_seq_cst_monotonic_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_seq_cst_monotonic_cmpxchg:
@@ -2984,8 +2984,8 @@ define amdgpu_kernel void @global_system_seq_cst_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -3034,8 +3034,8 @@ define amdgpu_kernel void @global_system_monotonic_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_monotonic_acquire_cmpxchg:
@@ -3047,8 +3047,8 @@ define amdgpu_kernel void @global_system_monotonic_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_monotonic_acquire_cmpxchg:
@@ -3119,8 +3119,8 @@ define amdgpu_kernel void @global_system_monotonic_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_monotonic_acquire_cmpxchg:
@@ -3131,8 +3131,8 @@ define amdgpu_kernel void @global_system_monotonic_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -3181,8 +3181,8 @@ define amdgpu_kernel void @global_system_acquire_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_acquire_acquire_cmpxchg:
@@ -3194,8 +3194,8 @@ define amdgpu_kernel void @global_system_acquire_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_acquire_acquire_cmpxchg:
@@ -3266,8 +3266,8 @@ define amdgpu_kernel void @global_system_acquire_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_acquire_acquire_cmpxchg:
@@ -3278,8 +3278,8 @@ define amdgpu_kernel void @global_system_acquire_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -3328,8 +3328,8 @@ define amdgpu_kernel void @global_system_release_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_release_acquire_cmpxchg:
@@ -3341,8 +3341,8 @@ define amdgpu_kernel void @global_system_release_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_release_acquire_cmpxchg:
@@ -3417,8 +3417,8 @@ define amdgpu_kernel void @global_system_release_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_release_acquire_cmpxchg:
@@ -3429,8 +3429,8 @@ define amdgpu_kernel void @global_system_release_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -3479,8 +3479,8 @@ define amdgpu_kernel void @global_system_acq_rel_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_acq_rel_acquire_cmpxchg:
@@ -3492,8 +3492,8 @@ define amdgpu_kernel void @global_system_acq_rel_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_acq_rel_acquire_cmpxchg:
@@ -3568,8 +3568,8 @@ define amdgpu_kernel void @global_system_acq_rel_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_acq_rel_acquire_cmpxchg:
@@ -3580,8 +3580,8 @@ define amdgpu_kernel void @global_system_acq_rel_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -3630,8 +3630,8 @@ define amdgpu_kernel void @global_system_seq_cst_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_seq_cst_acquire_cmpxchg:
@@ -3643,8 +3643,8 @@ define amdgpu_kernel void @global_system_seq_cst_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_seq_cst_acquire_cmpxchg:
@@ -3719,8 +3719,8 @@ define amdgpu_kernel void @global_system_seq_cst_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_seq_cst_acquire_cmpxchg:
@@ -3731,8 +3731,8 @@ define amdgpu_kernel void @global_system_seq_cst_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -3781,8 +3781,8 @@ define amdgpu_kernel void @global_system_seq_cst_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_seq_cst_seq_cst_cmpxchg:
@@ -3794,8 +3794,8 @@ define amdgpu_kernel void @global_system_seq_cst_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_seq_cst_seq_cst_cmpxchg:
@@ -3870,8 +3870,8 @@ define amdgpu_kernel void @global_system_seq_cst_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_seq_cst_seq_cst_cmpxchg:
@@ -3882,8 +3882,8 @@ define amdgpu_kernel void @global_system_seq_cst_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -4086,8 +4086,8 @@ define amdgpu_kernel void @global_system_acquire_monotonic_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -4100,8 +4100,8 @@ define amdgpu_kernel void @global_system_acquire_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -4178,8 +4178,8 @@ define amdgpu_kernel void @global_system_acquire_monotonic_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -4193,8 +4193,8 @@ define amdgpu_kernel void @global_system_acquire_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -4252,8 +4252,8 @@ define amdgpu_kernel void @global_system_acq_rel_monotonic_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -4266,8 +4266,8 @@ define amdgpu_kernel void @global_system_acq_rel_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -4348,8 +4348,8 @@ define amdgpu_kernel void @global_system_acq_rel_monotonic_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -4363,8 +4363,8 @@ define amdgpu_kernel void @global_system_acq_rel_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -4422,8 +4422,8 @@ define amdgpu_kernel void @global_system_seq_cst_monotonic_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -4436,8 +4436,8 @@ define amdgpu_kernel void @global_system_seq_cst_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -4518,8 +4518,8 @@ define amdgpu_kernel void @global_system_seq_cst_monotonic_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -4533,8 +4533,8 @@ define amdgpu_kernel void @global_system_seq_cst_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -4592,8 +4592,8 @@ define amdgpu_kernel void @global_system_monotonic_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -4606,8 +4606,8 @@ define amdgpu_kernel void @global_system_monotonic_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -4684,8 +4684,8 @@ define amdgpu_kernel void @global_system_monotonic_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -4699,8 +4699,8 @@ define amdgpu_kernel void @global_system_monotonic_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -4758,8 +4758,8 @@ define amdgpu_kernel void @global_system_acquire_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -4772,8 +4772,8 @@ define amdgpu_kernel void @global_system_acquire_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -4850,8 +4850,8 @@ define amdgpu_kernel void @global_system_acquire_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -4865,8 +4865,8 @@ define amdgpu_kernel void @global_system_acquire_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -4924,8 +4924,8 @@ define amdgpu_kernel void @global_system_release_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -4938,8 +4938,8 @@ define amdgpu_kernel void @global_system_release_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -5020,8 +5020,8 @@ define amdgpu_kernel void @global_system_release_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5035,8 +5035,8 @@ define amdgpu_kernel void @global_system_release_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5094,8 +5094,8 @@ define amdgpu_kernel void @global_system_acq_rel_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -5108,8 +5108,8 @@ define amdgpu_kernel void @global_system_acq_rel_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -5190,8 +5190,8 @@ define amdgpu_kernel void @global_system_acq_rel_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5205,8 +5205,8 @@ define amdgpu_kernel void @global_system_acq_rel_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5264,8 +5264,8 @@ define amdgpu_kernel void @global_system_seq_cst_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -5278,8 +5278,8 @@ define amdgpu_kernel void @global_system_seq_cst_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -5360,8 +5360,8 @@ define amdgpu_kernel void @global_system_seq_cst_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5375,8 +5375,8 @@ define amdgpu_kernel void @global_system_seq_cst_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5434,8 +5434,8 @@ define amdgpu_kernel void @global_system_monotonic_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -5448,8 +5448,8 @@ define amdgpu_kernel void @global_system_monotonic_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -5530,8 +5530,8 @@ define amdgpu_kernel void @global_system_monotonic_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5545,8 +5545,8 @@ define amdgpu_kernel void @global_system_monotonic_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5604,8 +5604,8 @@ define amdgpu_kernel void @global_system_acquire_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -5618,8 +5618,8 @@ define amdgpu_kernel void @global_system_acquire_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -5700,8 +5700,8 @@ define amdgpu_kernel void @global_system_acquire_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5715,8 +5715,8 @@ define amdgpu_kernel void @global_system_acquire_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5774,8 +5774,8 @@ define amdgpu_kernel void @global_system_relese_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -5788,8 +5788,8 @@ define amdgpu_kernel void @global_system_relese_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -5870,8 +5870,8 @@ define amdgpu_kernel void @global_system_relese_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5885,8 +5885,8 @@ define amdgpu_kernel void @global_system_relese_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5944,8 +5944,8 @@ define amdgpu_kernel void @global_system_acq_rel_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -5958,8 +5958,8 @@ define amdgpu_kernel void @global_system_acq_rel_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -6040,8 +6040,8 @@ define amdgpu_kernel void @global_system_acq_rel_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6055,8 +6055,8 @@ define amdgpu_kernel void @global_system_acq_rel_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6114,8 +6114,8 @@ define amdgpu_kernel void @global_system_seq_cst_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -6128,8 +6128,8 @@ define amdgpu_kernel void @global_system_seq_cst_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -6210,8 +6210,8 @@ define amdgpu_kernel void @global_system_seq_cst_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6225,8 +6225,8 @@ define amdgpu_kernel void @global_system_seq_cst_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6546,8 +6546,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_load(
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: global_load_dword v1, v0, s[0:1] glc dlc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v0, v1, s[2:3]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -6558,8 +6558,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_load(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: global_load_dword v1, v0, s[0:1] glc dlc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v0, v1, s[2:3]
; GFX10-CU-NEXT: s_endpgm
;
@@ -6631,8 +6631,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_load(
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: global_load_b32 v1, v0, s[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v0, v1, s[2:3]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6645,8 +6645,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_load(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: global_load_b32 v1, v0, s[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[2:3]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6696,8 +6696,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_load(
; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-WGP-NEXT: global_load_dword v1, v0, s[0:1] glc dlc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v0, v1, s[2:3]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -6708,8 +6708,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_load(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: global_load_dword v1, v0, s[0:1] glc dlc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v0, v1, s[2:3]
; GFX10-CU-NEXT: s_endpgm
;
@@ -6781,8 +6781,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_load(
; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-WGP-NEXT: global_load_b32 v1, v0, s[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v0, v1, s[2:3]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6795,8 +6795,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_load(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: global_load_b32 v1, v0, s[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[2:3]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -7478,8 +7478,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s2
; GFX10-WGP-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_one_as_acquire_atomicrmw:
@@ -7492,8 +7492,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX10-CU-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_one_as_acquire_atomicrmw:
@@ -7567,8 +7567,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_atomicrmw(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-WGP-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_one_as_acquire_atomicrmw:
@@ -7580,8 +7580,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_atomicrmw(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-CU-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in) {
entry:
@@ -7755,8 +7755,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s2
; GFX10-WGP-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_one_as_acq_rel_atomicrmw:
@@ -7769,8 +7769,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX10-CU-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_one_as_acq_rel_atomicrmw:
@@ -7848,8 +7848,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_atomicrmw(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-WGP-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_one_as_acq_rel_atomicrmw:
@@ -7861,8 +7861,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_atomicrmw(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-CU-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in) {
entry:
@@ -7907,8 +7907,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s2
; GFX10-WGP-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_one_as_seq_cst_atomicrmw:
@@ -7921,8 +7921,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX10-CU-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_one_as_seq_cst_atomicrmw:
@@ -8000,8 +8000,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_atomicrmw(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-WGP-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_one_as_seq_cst_atomicrmw:
@@ -8013,8 +8013,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_atomicrmw(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-CU-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in) {
entry:
@@ -8061,8 +8061,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_ret_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s2
; GFX10-WGP-NEXT: global_atomic_swap v1, v0, v1, s[0:1] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -8076,8 +8076,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX10-CU-NEXT: global_atomic_swap v1, v0, v1, s[0:1] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -8157,8 +8157,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_ret_atomicrmw(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-WGP-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -8173,8 +8173,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_ret_atomicrmw(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-CU-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -8225,8 +8225,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_ret_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s2
; GFX10-WGP-NEXT: global_atomic_swap v1, v0, v1, s[0:1] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -8240,8 +8240,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX10-CU-NEXT: global_atomic_swap v1, v0, v1, s[0:1] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -8325,8 +8325,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_ret_atomicrmw(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-WGP-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -8341,8 +8341,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_ret_atomicrmw(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-CU-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -8393,8 +8393,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_ret_atomicrmw(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s2
; GFX10-WGP-NEXT: global_atomic_swap v1, v0, v1, s[0:1] glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -8408,8 +8408,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX10-CU-NEXT: global_atomic_swap v1, v0, v1, s[0:1] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -8493,8 +8493,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_ret_atomicrmw(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-WGP-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -8509,8 +8509,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_ret_atomicrmw(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
; GFX11-CU-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -8686,8 +8686,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_monotonic_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_one_as_acquire_monotonic_cmpxchg:
@@ -8699,8 +8699,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_one_as_acquire_monotonic_cmpxchg:
@@ -8771,8 +8771,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_monotonic_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_one_as_acquire_monotonic_cmpxchg:
@@ -8783,8 +8783,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -8961,8 +8961,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_monotonic_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_one_as_acq_rel_monotonic_cmpxchg:
@@ -8974,8 +8974,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_one_as_acq_rel_monotonic_cmpxchg:
@@ -9050,8 +9050,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_monotonic_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_one_as_acq_rel_monotonic_cmpxchg:
@@ -9062,8 +9062,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -9112,8 +9112,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_monotonic_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_one_as_seq_cst_monotonic_cmpxchg:
@@ -9125,8 +9125,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_one_as_seq_cst_monotonic_cmpxchg:
@@ -9201,8 +9201,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_monotonic_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_one_as_seq_cst_monotonic_cmpxchg:
@@ -9213,8 +9213,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -9263,8 +9263,8 @@ define amdgpu_kernel void @global_system_one_as_monotonic_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_one_as_monotonic_acquire_cmpxchg:
@@ -9276,8 +9276,8 @@ define amdgpu_kernel void @global_system_one_as_monotonic_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_one_as_monotonic_acquire_cmpxchg:
@@ -9348,8 +9348,8 @@ define amdgpu_kernel void @global_system_one_as_monotonic_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_one_as_monotonic_acquire_cmpxchg:
@@ -9360,8 +9360,8 @@ define amdgpu_kernel void @global_system_one_as_monotonic_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -9410,8 +9410,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_one_as_acquire_acquire_cmpxchg:
@@ -9423,8 +9423,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_one_as_acquire_acquire_cmpxchg:
@@ -9495,8 +9495,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_one_as_acquire_acquire_cmpxchg:
@@ -9507,8 +9507,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -9557,8 +9557,8 @@ define amdgpu_kernel void @global_system_one_as_release_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_one_as_release_acquire_cmpxchg:
@@ -9570,8 +9570,8 @@ define amdgpu_kernel void @global_system_one_as_release_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_one_as_release_acquire_cmpxchg:
@@ -9646,8 +9646,8 @@ define amdgpu_kernel void @global_system_one_as_release_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_one_as_release_acquire_cmpxchg:
@@ -9658,8 +9658,8 @@ define amdgpu_kernel void @global_system_one_as_release_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -9708,8 +9708,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_one_as_acq_rel_acquire_cmpxchg:
@@ -9721,8 +9721,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_one_as_acq_rel_acquire_cmpxchg:
@@ -9797,8 +9797,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_one_as_acq_rel_acquire_cmpxchg:
@@ -9809,8 +9809,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -9859,8 +9859,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_acquire_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_one_as_seq_cst_acquire_cmpxchg:
@@ -9872,8 +9872,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_one_as_seq_cst_acquire_cmpxchg:
@@ -9948,8 +9948,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_acquire_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_one_as_seq_cst_acquire_cmpxchg:
@@ -9960,8 +9960,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_acquire_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -10010,8 +10010,8 @@ define amdgpu_kernel void @global_system_one_as_monotonic_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_one_as_monotonic_seq_cst_cmpxchg:
@@ -10023,8 +10023,8 @@ define amdgpu_kernel void @global_system_one_as_monotonic_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_one_as_monotonic_seq_cst_cmpxchg:
@@ -10099,8 +10099,8 @@ define amdgpu_kernel void @global_system_one_as_monotonic_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_one_as_monotonic_seq_cst_cmpxchg:
@@ -10111,8 +10111,8 @@ define amdgpu_kernel void @global_system_one_as_monotonic_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -10161,8 +10161,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_one_as_acquire_seq_cst_cmpxchg:
@@ -10174,8 +10174,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_one_as_acquire_seq_cst_cmpxchg:
@@ -10250,8 +10250,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_one_as_acquire_seq_cst_cmpxchg:
@@ -10262,8 +10262,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -10312,8 +10312,8 @@ define amdgpu_kernel void @global_system_one_as_release_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_one_as_release_seq_cst_cmpxchg:
@@ -10325,8 +10325,8 @@ define amdgpu_kernel void @global_system_one_as_release_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_one_as_release_seq_cst_cmpxchg:
@@ -10401,8 +10401,8 @@ define amdgpu_kernel void @global_system_one_as_release_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_one_as_release_seq_cst_cmpxchg:
@@ -10413,8 +10413,8 @@ define amdgpu_kernel void @global_system_one_as_release_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -10463,8 +10463,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_one_as_acq_rel_seq_cst_cmpxchg:
@@ -10476,8 +10476,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_one_as_acq_rel_seq_cst_cmpxchg:
@@ -10552,8 +10552,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_one_as_acq_rel_seq_cst_cmpxchg:
@@ -10564,8 +10564,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -10614,8 +10614,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_seq_cst_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: s_endpgm
;
; GFX10-CU-LABEL: global_system_one_as_seq_cst_seq_cst_cmpxchg:
@@ -10627,8 +10627,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_system_one_as_seq_cst_seq_cst_cmpxchg:
@@ -10703,8 +10703,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_seq_cst_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: s_endpgm
;
; GFX11-CU-LABEL: global_system_one_as_seq_cst_seq_cst_cmpxchg:
@@ -10715,8 +10715,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], s[0:1] offset:16
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: s_endpgm
ptr addrspace(1) %out, i32 %in, i32 %old) {
entry:
@@ -10919,8 +10919,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_monotonic_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -10933,8 +10933,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -11011,8 +11011,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_monotonic_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -11026,8 +11026,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -11239,8 +11239,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_monotonic_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -11253,8 +11253,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -11335,8 +11335,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_monotonic_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -11350,8 +11350,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -11409,8 +11409,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_monotonic_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -11423,8 +11423,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -11505,8 +11505,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_monotonic_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -11520,8 +11520,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -11579,8 +11579,8 @@ define amdgpu_kernel void @global_system_one_as_monotonic_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -11593,8 +11593,8 @@ define amdgpu_kernel void @global_system_one_as_monotonic_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -11671,8 +11671,8 @@ define amdgpu_kernel void @global_system_one_as_monotonic_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -11686,8 +11686,8 @@ define amdgpu_kernel void @global_system_one_as_monotonic_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -11745,8 +11745,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -11759,8 +11759,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -11837,8 +11837,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -11852,8 +11852,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -11911,8 +11911,8 @@ define amdgpu_kernel void @global_system_one_as_release_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -11925,8 +11925,8 @@ define amdgpu_kernel void @global_system_one_as_release_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -12007,8 +12007,8 @@ define amdgpu_kernel void @global_system_one_as_release_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -12022,8 +12022,8 @@ define amdgpu_kernel void @global_system_one_as_release_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -12081,8 +12081,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -12095,8 +12095,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -12177,8 +12177,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -12192,8 +12192,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -12251,8 +12251,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_acquire_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -12265,8 +12265,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -12347,8 +12347,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_acquire_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -12362,8 +12362,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -12421,8 +12421,8 @@ define amdgpu_kernel void @global_system_one_as_monotonic_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -12435,8 +12435,8 @@ define amdgpu_kernel void @global_system_one_as_monotonic_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -12517,8 +12517,8 @@ define amdgpu_kernel void @global_system_one_as_monotonic_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -12532,8 +12532,8 @@ define amdgpu_kernel void @global_system_one_as_monotonic_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -12591,8 +12591,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -12605,8 +12605,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -12687,8 +12687,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -12702,8 +12702,8 @@ define amdgpu_kernel void @global_system_one_as_acquire_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -12761,8 +12761,8 @@ define amdgpu_kernel void @global_system_one_as_release_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -12775,8 +12775,8 @@ define amdgpu_kernel void @global_system_one_as_release_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -12857,8 +12857,8 @@ define amdgpu_kernel void @global_system_one_as_release_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -12872,8 +12872,8 @@ define amdgpu_kernel void @global_system_one_as_release_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -12931,8 +12931,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -12945,8 +12945,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -13027,8 +13027,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -13042,8 +13042,8 @@ define amdgpu_kernel void @global_system_one_as_acq_rel_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -13101,8 +13101,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_seq_cst_ret_cmpxchg(
; GFX10-WGP-NEXT: v_mov_b32_e32 v1, s3
; GFX10-WGP-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
; GFX10-WGP-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-WGP-NEXT: s_endpgm
;
@@ -13115,8 +13115,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
; GFX10-CU-NEXT: global_store_dword v2, v0, s[0:1]
; GFX10-CU-NEXT: s_endpgm
;
@@ -13197,8 +13197,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_seq_cst_ret_cmpxchg(
; GFX11-WGP-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-WGP-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
-; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
; GFX11-WGP-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-WGP-NEXT: s_nop 0
; GFX11-WGP-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -13212,8 +13212,8 @@ define amdgpu_kernel void @global_system_one_as_seq_cst_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
-; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
; GFX11-CU-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-CU-NEXT: s_nop 0
; GFX11-CU-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
diff --git a/llvm/test/CodeGen/AMDGPU/preload-kernarg-header.ll b/llvm/test/CodeGen/AMDGPU/preload-kernarg-header.ll
index 75feac3..a70488a 100644
--- a/llvm/test/CodeGen/AMDGPU/preload-kernarg-header.ll
+++ b/llvm/test/CodeGen/AMDGPU/preload-kernarg-header.ll
@@ -1,8 +1,11 @@
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -asm-verbose=0 < %s | FileCheck -check-prefixes=GCN %s
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -filetype=obj < %s | llvm-objdump --arch=amdgcn --mcpu=gfx940 --disassemble - | FileCheck -check-prefixes=GCN %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -asm-verbose=0 < %s | FileCheck -check-prefixes=GCN,HSA %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -filetype=obj < %s | llvm-objdump --arch=amdgcn --mcpu=gfx940 --disassemble - | FileCheck -check-prefixes=GCN,HSA %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -filetype=obj < %s | llvm-objdump --arch=amdgcn --mcpu=gfx940 --disassemble - | FileCheck -check-prefixes=GCN,NON-HSA %s
; GCN: preload_kernarg_header
-; GCN-COUNT-64: s_nop 0
+; HSA: s_trap 2
+; NON-HSA: s_endpgm
+; GCN-COUNT-63: s_nop 0
define amdgpu_kernel void @preload_kernarg_header(ptr %arg) {
store ptr %arg, ptr %arg
ret void
diff --git a/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll b/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll
index 5798021..d20c3a4 100644
--- a/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll
+++ b/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll
@@ -1,1856 +1,3681 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx940 -verify-machineinstrs < %s | FileCheck -check-prefixes=NO-PRELOAD %s
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -verify-machineinstrs < %s | FileCheck -check-prefixes=PRELOAD-1 %s
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=2 -verify-machineinstrs < %s | FileCheck -check-prefixes=PRELOAD-2 %s
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=4 -verify-machineinstrs < %s | FileCheck -check-prefixes=PRELOAD-4 %s
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=8 -verify-machineinstrs < %s | FileCheck -check-prefixes=PRELOAD-8 %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx940 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX940-NO-PRELOAD %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX940-PRELOAD-1 %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=2 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX940-PRELOAD-2 %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=4 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX940-PRELOAD-4 %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=8 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX940-PRELOAD-8 %s
+
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx90a -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX90a-NO-PRELOAD %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx90a -amdgpu-kernarg-preload-count=1 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX90a-PRELOAD-1 %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx90a -amdgpu-kernarg-preload-count=2 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX90a-PRELOAD-2 %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx90a -amdgpu-kernarg-preload-count=4 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX90a-PRELOAD-4 %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx90a -amdgpu-kernarg-preload-count=8 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX90a-PRELOAD-8 %s
define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
-; NO-PRELOAD-LABEL: ptr1_i8:
-; NO-PRELOAD: ; %bb.0:
-; NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x8
-; NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
-; NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
-; NO-PRELOAD-NEXT: s_and_b32 s0, s4, 0xff
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s0
-; NO-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; NO-PRELOAD-NEXT: s_endpgm
-;
-; PRELOAD-1-LABEL: ptr1_i8:
-; PRELOAD-1: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: ; %bb.0:
-; PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
-; PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-1-NEXT: s_and_b32 s0, s0, 0xff
-; PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
-; PRELOAD-1-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-1-NEXT: s_endpgm
-;
-; PRELOAD-2-LABEL: ptr1_i8:
-; PRELOAD-2: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: ; %bb.0:
-; PRELOAD-2-NEXT: s_and_b32 s0, s4, 0xff
-; PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-2-NEXT: v_mov_b32_e32 v1, s0
-; PRELOAD-2-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-2-NEXT: s_endpgm
-;
-; PRELOAD-4-LABEL: ptr1_i8:
-; PRELOAD-4: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: ; %bb.0:
-; PRELOAD-4-NEXT: s_and_b32 s0, s4, 0xff
-; PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-4-NEXT: v_mov_b32_e32 v1, s0
-; PRELOAD-4-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-4-NEXT: s_endpgm
-;
-; PRELOAD-8-LABEL: ptr1_i8:
-; PRELOAD-8: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: ; %bb.0:
-; PRELOAD-8-NEXT: s_and_b32 s0, s4, 0xff
-; PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-8-NEXT: v_mov_b32_e32 v1, s0
-; PRELOAD-8-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-8-NEXT: s_endpgm
+; GFX940-NO-PRELOAD-LABEL: ptr1_i8:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x8
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: s_and_b32 s0, s4, 0xff
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-1-LABEL: ptr1_i8:
+; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: ; %bb.0:
+; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-1-NEXT: s_and_b32 s0, s0, 0xff
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-PRELOAD-1-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: ptr1_i8:
+; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: s_and_b32 s0, s4, 0xff
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-PRELOAD-2-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-4-LABEL: ptr1_i8:
+; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: ; %bb.0:
+; GFX940-PRELOAD-4-NEXT: s_and_b32 s0, s4, 0xff
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-PRELOAD-4-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: ptr1_i8:
+; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: s_and_b32 s0, s4, 0xff
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-PRELOAD-8-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: ptr1_i8:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dword s2, s[4:5], 0x8
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: s_and_b32 s2, s2, 0xff
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s2
+; GFX90a-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-1-LABEL: ptr1_i8:
+; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-1-NEXT: s_and_b32 s0, s0, 0xff
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
+; GFX90a-PRELOAD-1-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: ptr1_i8:
+; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: s_and_b32 s0, s8, 0xff
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s0
+; GFX90a-PRELOAD-2-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-4-LABEL: ptr1_i8:
+; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-4-NEXT: s_and_b32 s0, s8, 0xff
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s0
+; GFX90a-PRELOAD-4-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: ptr1_i8:
+; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: s_and_b32 s0, s8, 0xff
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s0
+; GFX90a-PRELOAD-8-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
%ext = zext i8 %arg0 to i32
store i32 %ext, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %arg0) {
-; NO-PRELOAD-LABEL: ptr1_i8_zext_arg:
-; NO-PRELOAD: ; %bb.0:
-; NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x8
-; NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
-; NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
-; NO-PRELOAD-NEXT: s_and_b32 s0, s4, 0xff
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s0
-; NO-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; NO-PRELOAD-NEXT: s_endpgm
-;
-; PRELOAD-1-LABEL: ptr1_i8_zext_arg:
-; PRELOAD-1: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: ; %bb.0:
-; PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
-; PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-1-NEXT: s_and_b32 s0, s0, 0xff
-; PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
-; PRELOAD-1-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-1-NEXT: s_endpgm
-;
-; PRELOAD-2-LABEL: ptr1_i8_zext_arg:
-; PRELOAD-2: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: ; %bb.0:
-; PRELOAD-2-NEXT: s_mov_b32 s0, 0xffff
-; PRELOAD-2-NEXT: v_mov_b32_e32 v1, s4
-; PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-2-NEXT: v_and_b32_sdwa v1, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; PRELOAD-2-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-2-NEXT: s_endpgm
-;
-; PRELOAD-4-LABEL: ptr1_i8_zext_arg:
-; PRELOAD-4: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: ; %bb.0:
-; PRELOAD-4-NEXT: s_mov_b32 s0, 0xffff
-; PRELOAD-4-NEXT: v_mov_b32_e32 v1, s4
-; PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-4-NEXT: v_and_b32_sdwa v1, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; PRELOAD-4-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-4-NEXT: s_endpgm
-;
-; PRELOAD-8-LABEL: ptr1_i8_zext_arg:
-; PRELOAD-8: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: ; %bb.0:
-; PRELOAD-8-NEXT: s_mov_b32 s0, 0xffff
-; PRELOAD-8-NEXT: v_mov_b32_e32 v1, s4
-; PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-8-NEXT: v_and_b32_sdwa v1, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; PRELOAD-8-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-8-NEXT: s_endpgm
+; GFX940-NO-PRELOAD-LABEL: ptr1_i8_zext_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x8
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: s_and_b32 s0, s4, 0xff
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-1-LABEL: ptr1_i8_zext_arg:
+; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: ; %bb.0:
+; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-1-NEXT: s_and_b32 s0, s0, 0xff
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-PRELOAD-1-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: ptr1_i8_zext_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: s_mov_b32 s0, 0xffff
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-2-NEXT: v_and_b32_sdwa v1, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX940-PRELOAD-2-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-4-LABEL: ptr1_i8_zext_arg:
+; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: ; %bb.0:
+; GFX940-PRELOAD-4-NEXT: s_mov_b32 s0, 0xffff
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-4-NEXT: v_and_b32_sdwa v1, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX940-PRELOAD-4-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: ptr1_i8_zext_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: s_mov_b32 s0, 0xffff
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-8-NEXT: v_and_b32_sdwa v1, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX940-PRELOAD-8-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: ptr1_i8_zext_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dword s2, s[4:5], 0x8
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: s_and_b32 s2, s2, 0xff
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s2
+; GFX90a-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-1-LABEL: ptr1_i8_zext_arg:
+; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-1-NEXT: s_and_b32 s0, s0, 0xff
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
+; GFX90a-PRELOAD-1-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: ptr1_i8_zext_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: s_mov_b32 s0, 0xffff
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s8
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-2-NEXT: v_and_b32_sdwa v1, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX90a-PRELOAD-2-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-4-LABEL: ptr1_i8_zext_arg:
+; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-4-NEXT: s_mov_b32 s0, 0xffff
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s8
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-4-NEXT: v_and_b32_sdwa v1, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX90a-PRELOAD-4-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: ptr1_i8_zext_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: s_mov_b32 s0, 0xffff
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s8
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-8-NEXT: v_and_b32_sdwa v1, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX90a-PRELOAD-8-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
%ext = zext i8 %arg0 to i32
store i32 %ext, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0) {
-; NO-PRELOAD-LABEL: ptr1_i16_preload_arg:
-; NO-PRELOAD: ; %bb.0:
-; NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x8
-; NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
-; NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
-; NO-PRELOAD-NEXT: s_and_b32 s0, s4, 0xffff
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s0
-; NO-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; NO-PRELOAD-NEXT: s_endpgm
-;
-; PRELOAD-1-LABEL: ptr1_i16_preload_arg:
-; PRELOAD-1: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: ; %bb.0:
-; PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
-; PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-1-NEXT: s_and_b32 s0, s0, 0xffff
-; PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
-; PRELOAD-1-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-1-NEXT: s_endpgm
-;
-; PRELOAD-2-LABEL: ptr1_i16_preload_arg:
-; PRELOAD-2: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: ; %bb.0:
-; PRELOAD-2-NEXT: s_and_b32 s0, s4, 0xffff
-; PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-2-NEXT: v_mov_b32_e32 v1, s0
-; PRELOAD-2-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-2-NEXT: s_endpgm
-;
-; PRELOAD-4-LABEL: ptr1_i16_preload_arg:
-; PRELOAD-4: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: ; %bb.0:
-; PRELOAD-4-NEXT: s_and_b32 s0, s4, 0xffff
-; PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-4-NEXT: v_mov_b32_e32 v1, s0
-; PRELOAD-4-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-4-NEXT: s_endpgm
-;
-; PRELOAD-8-LABEL: ptr1_i16_preload_arg:
-; PRELOAD-8: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: ; %bb.0:
-; PRELOAD-8-NEXT: s_and_b32 s0, s4, 0xffff
-; PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-8-NEXT: v_mov_b32_e32 v1, s0
-; PRELOAD-8-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-8-NEXT: s_endpgm
+; GFX940-NO-PRELOAD-LABEL: ptr1_i16_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x8
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: s_and_b32 s0, s4, 0xffff
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-1-LABEL: ptr1_i16_preload_arg:
+; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: ; %bb.0:
+; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-1-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-PRELOAD-1-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: ptr1_i16_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: s_and_b32 s0, s4, 0xffff
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-PRELOAD-2-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-4-LABEL: ptr1_i16_preload_arg:
+; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: ; %bb.0:
+; GFX940-PRELOAD-4-NEXT: s_and_b32 s0, s4, 0xffff
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-PRELOAD-4-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: ptr1_i16_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: s_and_b32 s0, s4, 0xffff
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-PRELOAD-8-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: ptr1_i16_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dword s2, s[4:5], 0x8
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s2
+; GFX90a-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-1-LABEL: ptr1_i16_preload_arg:
+; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-1-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
+; GFX90a-PRELOAD-1-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: ptr1_i16_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: s_and_b32 s0, s8, 0xffff
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s0
+; GFX90a-PRELOAD-2-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-4-LABEL: ptr1_i16_preload_arg:
+; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-4-NEXT: s_and_b32 s0, s8, 0xffff
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s0
+; GFX90a-PRELOAD-4-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: ptr1_i16_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: s_and_b32 s0, s8, 0xffff
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s0
+; GFX90a-PRELOAD-8-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
%ext = zext i16 %arg0 to i32
store i32 %ext, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0) {
-; NO-PRELOAD-LABEL: ptr1_i32_preload_arg:
-; NO-PRELOAD: ; %bb.0:
-; NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x8
-; NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
-; NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s4
-; NO-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; NO-PRELOAD-NEXT: s_endpgm
-;
-; PRELOAD-1-LABEL: ptr1_i32_preload_arg:
-; PRELOAD-1: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: ; %bb.0:
-; PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
-; PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
-; PRELOAD-1-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-1-NEXT: s_endpgm
-;
-; PRELOAD-2-LABEL: ptr1_i32_preload_arg:
-; PRELOAD-2: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: ; %bb.0:
-; PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-2-NEXT: v_mov_b32_e32 v1, s4
-; PRELOAD-2-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-2-NEXT: s_endpgm
-;
-; PRELOAD-4-LABEL: ptr1_i32_preload_arg:
-; PRELOAD-4: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: ; %bb.0:
-; PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-4-NEXT: v_mov_b32_e32 v1, s4
-; PRELOAD-4-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-4-NEXT: s_endpgm
-;
-; PRELOAD-8-LABEL: ptr1_i32_preload_arg:
-; PRELOAD-8: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: ; %bb.0:
-; PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-8-NEXT: v_mov_b32_e32 v1, s4
-; PRELOAD-8-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-8-NEXT: s_endpgm
+; GFX940-NO-PRELOAD-LABEL: ptr1_i32_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x8
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-1-LABEL: ptr1_i32_preload_arg:
+; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: ; %bb.0:
+; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-PRELOAD-1-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: ptr1_i32_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-PRELOAD-2-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-4-LABEL: ptr1_i32_preload_arg:
+; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: ; %bb.0:
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-PRELOAD-4-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: ptr1_i32_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-PRELOAD-8-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: ptr1_i32_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dword s2, s[4:5], 0x8
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s2
+; GFX90a-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-1-LABEL: ptr1_i32_preload_arg:
+; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
+; GFX90a-PRELOAD-1-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: ptr1_i32_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s8
+; GFX90a-PRELOAD-2-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-4-LABEL: ptr1_i32_preload_arg:
+; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s8
+; GFX90a-PRELOAD-4-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: ptr1_i32_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s8
+; GFX90a-PRELOAD-8-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
store i32 %arg0, ptr addrspace(1) %out
ret void
}
-; Check alignment on the second preloaded arg.
define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1) %out, i32 %arg1) {
-; NO-PRELOAD-LABEL: i32_ptr1_i32_preload_arg:
-; NO-PRELOAD: ; %bb.0:
-; NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x10
-; NO-PRELOAD-NEXT: s_load_dword s5, s[0:1], 0x0
-; NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x8
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
-; NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
-; NO-PRELOAD-NEXT: s_add_i32 s0, s5, s4
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s0
-; NO-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; NO-PRELOAD-NEXT: s_endpgm
-;
-; PRELOAD-1-LABEL: i32_ptr1_i32_preload_arg:
-; PRELOAD-1: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: ; %bb.0:
-; PRELOAD-1-NEXT: s_load_dword s3, s[0:1], 0x10
-; PRELOAD-1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x8
-; PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-1-NEXT: s_add_i32 s0, s2, s3
-; PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
-; PRELOAD-1-NEXT: global_store_dword v0, v1, s[4:5] sc0 sc1
-; PRELOAD-1-NEXT: s_endpgm
-;
-; PRELOAD-2-LABEL: i32_ptr1_i32_preload_arg:
-; PRELOAD-2: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: ; %bb.0:
-; PRELOAD-2-NEXT: s_load_dword s0, s[0:1], 0x10
-; PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-2-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-2-NEXT: s_add_i32 s0, s2, s0
-; PRELOAD-2-NEXT: v_mov_b32_e32 v1, s0
-; PRELOAD-2-NEXT: global_store_dword v0, v1, s[4:5] sc0 sc1
-; PRELOAD-2-NEXT: s_endpgm
-;
-; PRELOAD-4-LABEL: i32_ptr1_i32_preload_arg:
-; PRELOAD-4: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: ; %bb.0:
-; PRELOAD-4-NEXT: s_add_i32 s0, s2, s6
-; PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-4-NEXT: v_mov_b32_e32 v1, s0
-; PRELOAD-4-NEXT: global_store_dword v0, v1, s[4:5] sc0 sc1
-; PRELOAD-4-NEXT: s_endpgm
-;
-; PRELOAD-8-LABEL: i32_ptr1_i32_preload_arg:
-; PRELOAD-8: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: ; %bb.0:
-; PRELOAD-8-NEXT: s_add_i32 s0, s2, s6
-; PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-8-NEXT: v_mov_b32_e32 v1, s0
-; PRELOAD-8-NEXT: global_store_dword v0, v1, s[4:5] sc0 sc1
-; PRELOAD-8-NEXT: s_endpgm
+; GFX940-NO-PRELOAD-LABEL: i32_ptr1_i32_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x10
+; GFX940-NO-PRELOAD-NEXT: s_load_dword s5, s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x8
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: s_add_i32 s0, s5, s4
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-1-LABEL: i32_ptr1_i32_preload_arg:
+; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: ; %bb.0:
+; GFX940-PRELOAD-1-NEXT: s_load_dword s3, s[0:1], 0x10
+; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x8
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-1-NEXT: s_add_i32 s0, s2, s3
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-PRELOAD-1-NEXT: global_store_dword v0, v1, s[4:5] sc0 sc1
+; GFX940-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: i32_ptr1_i32_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: s_load_dword s0, s[0:1], 0x10
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-2-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-2-NEXT: s_add_i32 s0, s2, s0
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-PRELOAD-2-NEXT: global_store_dword v0, v1, s[4:5] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-4-LABEL: i32_ptr1_i32_preload_arg:
+; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: ; %bb.0:
+; GFX940-PRELOAD-4-NEXT: s_add_i32 s0, s2, s6
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-PRELOAD-4-NEXT: global_store_dword v0, v1, s[4:5] sc0 sc1
+; GFX940-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: i32_ptr1_i32_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: s_add_i32 s0, s2, s6
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-PRELOAD-8-NEXT: global_store_dword v0, v1, s[4:5] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: i32_ptr1_i32_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dword s2, s[4:5], 0x10
+; GFX90a-NO-PRELOAD-NEXT: s_load_dword s3, s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: s_add_i32 s2, s3, s2
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s2
+; GFX90a-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-1-LABEL: i32_ptr1_i32_preload_arg:
+; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-1-NEXT: s_load_dword s2, s[4:5], 0x10
+; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-1-NEXT: s_add_i32 s2, s6, s2
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s2
+; GFX90a-PRELOAD-1-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX90a-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: i32_ptr1_i32_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: s_load_dword s0, s[4:5], 0x10
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-2-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-2-NEXT: s_add_i32 s0, s6, s0
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s0
+; GFX90a-PRELOAD-2-NEXT: global_store_dword v0, v1, s[8:9]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-4-LABEL: i32_ptr1_i32_preload_arg:
+; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-4-NEXT: s_add_i32 s0, s6, s10
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s0
+; GFX90a-PRELOAD-4-NEXT: global_store_dword v0, v1, s[8:9]
+; GFX90a-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: i32_ptr1_i32_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: s_add_i32 s0, s6, s10
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s0
+; GFX90a-PRELOAD-8-NEXT: global_store_dword v0, v1, s[8:9]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
%add = add i32 %arg0, %arg1
store i32 %add, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0, i16 %arg1) {
-; NO-PRELOAD-LABEL: ptr1_i16_i16_preload_arg:
-; NO-PRELOAD: ; %bb.0:
-; NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x8
-; NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
-; NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
-; NO-PRELOAD-NEXT: s_lshr_b32 s0, s4, 16
-; NO-PRELOAD-NEXT: s_and_b32 s1, s4, 0xffff
-; NO-PRELOAD-NEXT: s_add_i32 s0, s1, s0
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s0
-; NO-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; NO-PRELOAD-NEXT: s_endpgm
-;
-; PRELOAD-1-LABEL: ptr1_i16_i16_preload_arg:
-; PRELOAD-1: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: ; %bb.0:
-; PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
-; PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-1-NEXT: s_lshr_b32 s1, s0, 16
-; PRELOAD-1-NEXT: s_and_b32 s0, s0, 0xffff
-; PRELOAD-1-NEXT: s_add_i32 s0, s0, s1
-; PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
-; PRELOAD-1-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-1-NEXT: s_endpgm
-;
-; PRELOAD-2-LABEL: ptr1_i16_i16_preload_arg:
-; PRELOAD-2: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: ; %bb.0:
-; PRELOAD-2-NEXT: s_load_dword s0, s[0:1], 0x8
-; PRELOAD-2-NEXT: s_and_b32 s1, s4, 0xffff
-; PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-2-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-2-NEXT: s_lshr_b32 s0, s0, 16
-; PRELOAD-2-NEXT: s_add_i32 s0, s1, s0
-; PRELOAD-2-NEXT: v_mov_b32_e32 v1, s0
-; PRELOAD-2-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-2-NEXT: s_endpgm
-;
-; PRELOAD-4-LABEL: ptr1_i16_i16_preload_arg:
-; PRELOAD-4: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: ; %bb.0:
-; PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 16
-; PRELOAD-4-NEXT: s_and_b32 s1, s4, 0xffff
-; PRELOAD-4-NEXT: s_add_i32 s0, s1, s0
-; PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-4-NEXT: v_mov_b32_e32 v1, s0
-; PRELOAD-4-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-4-NEXT: s_endpgm
-;
-; PRELOAD-8-LABEL: ptr1_i16_i16_preload_arg:
-; PRELOAD-8: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: ; %bb.0:
-; PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 16
-; PRELOAD-8-NEXT: s_and_b32 s1, s4, 0xffff
-; PRELOAD-8-NEXT: s_add_i32 s0, s1, s0
-; PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-8-NEXT: v_mov_b32_e32 v1, s0
-; PRELOAD-8-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-8-NEXT: s_endpgm
+; GFX940-NO-PRELOAD-LABEL: ptr1_i16_i16_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x8
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: s_lshr_b32 s0, s4, 16
+; GFX940-NO-PRELOAD-NEXT: s_and_b32 s1, s4, 0xffff
+; GFX940-NO-PRELOAD-NEXT: s_add_i32 s0, s1, s0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-1-LABEL: ptr1_i16_i16_preload_arg:
+; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: ; %bb.0:
+; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-1-NEXT: s_lshr_b32 s1, s0, 16
+; GFX940-PRELOAD-1-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX940-PRELOAD-1-NEXT: s_add_i32 s0, s0, s1
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-PRELOAD-1-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: ptr1_i16_i16_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: s_load_dword s0, s[0:1], 0x8
+; GFX940-PRELOAD-2-NEXT: s_and_b32 s1, s4, 0xffff
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-2-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-2-NEXT: s_lshr_b32 s0, s0, 16
+; GFX940-PRELOAD-2-NEXT: s_add_i32 s0, s1, s0
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-PRELOAD-2-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-4-LABEL: ptr1_i16_i16_preload_arg:
+; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: ; %bb.0:
+; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 16
+; GFX940-PRELOAD-4-NEXT: s_and_b32 s1, s4, 0xffff
+; GFX940-PRELOAD-4-NEXT: s_add_i32 s0, s1, s0
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-PRELOAD-4-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: ptr1_i16_i16_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 16
+; GFX940-PRELOAD-8-NEXT: s_and_b32 s1, s4, 0xffff
+; GFX940-PRELOAD-8-NEXT: s_add_i32 s0, s1, s0
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-PRELOAD-8-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: ptr1_i16_i16_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dword s2, s[4:5], 0x8
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: s_lshr_b32 s3, s2, 16
+; GFX90a-NO-PRELOAD-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX90a-NO-PRELOAD-NEXT: s_add_i32 s2, s2, s3
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s2
+; GFX90a-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-1-LABEL: ptr1_i16_i16_preload_arg:
+; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-1-NEXT: s_lshr_b32 s1, s0, 16
+; GFX90a-PRELOAD-1-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX90a-PRELOAD-1-NEXT: s_add_i32 s0, s0, s1
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
+; GFX90a-PRELOAD-1-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: ptr1_i16_i16_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: s_load_dword s0, s[4:5], 0x8
+; GFX90a-PRELOAD-2-NEXT: s_and_b32 s1, s8, 0xffff
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-2-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-2-NEXT: s_lshr_b32 s0, s0, 16
+; GFX90a-PRELOAD-2-NEXT: s_add_i32 s0, s1, s0
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s0
+; GFX90a-PRELOAD-2-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-4-LABEL: ptr1_i16_i16_preload_arg:
+; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s8, 16
+; GFX90a-PRELOAD-4-NEXT: s_and_b32 s1, s8, 0xffff
+; GFX90a-PRELOAD-4-NEXT: s_add_i32 s0, s1, s0
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s0
+; GFX90a-PRELOAD-4-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: ptr1_i16_i16_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s8, 16
+; GFX90a-PRELOAD-8-NEXT: s_and_b32 s1, s8, 0xffff
+; GFX90a-PRELOAD-8-NEXT: s_add_i32 s0, s1, s0
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s0
+; GFX90a-PRELOAD-8-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
%ext = zext i16 %arg0 to i32
%ext1 = zext i16 %arg1 to i32
%add = add i32 %ext, %ext1
@@ -1859,3563 +3684,7068 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
}
define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8> %in) {
-; NO-PRELOAD-LABEL: ptr1_v2i8_preload_arg:
-; NO-PRELOAD: ; %bb.0:
-; NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x8
-; NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
-; NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s4
-; NO-PRELOAD-NEXT: global_store_short v0, v1, s[2:3] sc0 sc1
-; NO-PRELOAD-NEXT: s_endpgm
-;
-; PRELOAD-1-LABEL: ptr1_v2i8_preload_arg:
-; PRELOAD-1: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: ; %bb.0:
-; PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
-; PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
-; PRELOAD-1-NEXT: global_store_short v0, v1, s[2:3] sc0 sc1
-; PRELOAD-1-NEXT: s_endpgm
-;
-; PRELOAD-2-LABEL: ptr1_v2i8_preload_arg:
-; PRELOAD-2: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: ; %bb.0:
-; PRELOAD-2-NEXT: s_lshr_b32 s0, s4, 8
-; PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
-; PRELOAD-2-NEXT: v_or_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; PRELOAD-2-NEXT: v_mov_b32_e32 v1, 0
-; PRELOAD-2-NEXT: global_store_short v1, v0, s[2:3] sc0 sc1
-; PRELOAD-2-NEXT: s_endpgm
-;
-; PRELOAD-4-LABEL: ptr1_v2i8_preload_arg:
-; PRELOAD-4: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: ; %bb.0:
-; PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 8
-; PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
-; PRELOAD-4-NEXT: v_or_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; PRELOAD-4-NEXT: v_mov_b32_e32 v1, 0
-; PRELOAD-4-NEXT: global_store_short v1, v0, s[2:3] sc0 sc1
-; PRELOAD-4-NEXT: s_endpgm
-;
-; PRELOAD-8-LABEL: ptr1_v2i8_preload_arg:
-; PRELOAD-8: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: ; %bb.0:
-; PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 8
-; PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
-; PRELOAD-8-NEXT: v_or_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; PRELOAD-8-NEXT: v_mov_b32_e32 v1, 0
-; PRELOAD-8-NEXT: global_store_short v1, v0, s[2:3] sc0 sc1
-; PRELOAD-8-NEXT: s_endpgm
+; GFX940-NO-PRELOAD-LABEL: ptr1_v2i8_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x8
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-NO-PRELOAD-NEXT: global_store_short v0, v1, s[2:3] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-1-LABEL: ptr1_v2i8_preload_arg:
+; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: ; %bb.0:
+; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-PRELOAD-1-NEXT: global_store_short v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: ptr1_v2i8_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: s_lshr_b32 s0, s4, 8
+; GFX940-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX940-PRELOAD-2-NEXT: v_or_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, 0
+; GFX940-PRELOAD-2-NEXT: global_store_short v1, v0, s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-4-LABEL: ptr1_v2i8_preload_arg:
+; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: ; %bb.0:
+; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 8
+; GFX940-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX940-PRELOAD-4-NEXT: v_or_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, 0
+; GFX940-PRELOAD-4-NEXT: global_store_short v1, v0, s[2:3] sc0 sc1
+; GFX940-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: ptr1_v2i8_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 8
+; GFX940-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX940-PRELOAD-8-NEXT: v_or_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, 0
+; GFX940-PRELOAD-8-NEXT: global_store_short v1, v0, s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: ptr1_v2i8_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dword s2, s[4:5], 0x8
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s2
+; GFX90a-NO-PRELOAD-NEXT: global_store_short v0, v1, s[0:1]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-1-LABEL: ptr1_v2i8_preload_arg:
+; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
+; GFX90a-PRELOAD-1-NEXT: global_store_short v0, v1, s[6:7]
+; GFX90a-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: ptr1_v2i8_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: s_lshr_b32 s0, s8, 8
+; GFX90a-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX90a-PRELOAD-2-NEXT: v_or_b32_sdwa v0, s8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, 0
+; GFX90a-PRELOAD-2-NEXT: global_store_short v1, v0, s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-4-LABEL: ptr1_v2i8_preload_arg:
+; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s8, 8
+; GFX90a-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX90a-PRELOAD-4-NEXT: v_or_b32_sdwa v0, s8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, 0
+; GFX90a-PRELOAD-4-NEXT: global_store_short v1, v0, s[6:7]
+; GFX90a-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: ptr1_v2i8_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s8, 8
+; GFX90a-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX90a-PRELOAD-8-NEXT: v_or_b32_sdwa v0, s8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, 0
+; GFX90a-PRELOAD-8-NEXT: global_store_short v1, v0, s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
store <2 x i8> %in, ptr addrspace(1) %out
ret void
}
-; Don't try to preload byref args.
define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspace(4) byref(i32) align(256) %in.byref, i32 %after.offset) {
-; NO-PRELOAD-LABEL: byref_preload_arg:
-; NO-PRELOAD: ; %bb.0:
-; NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x100
-; NO-PRELOAD-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
-; NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s2
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s3
-; NO-PRELOAD-NEXT: global_store_dword v0, v1, s[4:5] sc0 sc1
-; NO-PRELOAD-NEXT: s_waitcnt vmcnt(0)
-; NO-PRELOAD-NEXT: global_store_dword v0, v2, s[4:5] sc0 sc1
-; NO-PRELOAD-NEXT: s_waitcnt vmcnt(0)
-; NO-PRELOAD-NEXT: s_endpgm
-;
-; PRELOAD-1-LABEL: byref_preload_arg:
-; PRELOAD-1: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: ; %bb.0:
-; PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x100
-; PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
-; PRELOAD-1-NEXT: v_mov_b32_e32 v2, s1
-; PRELOAD-1-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-1-NEXT: s_waitcnt vmcnt(0)
-; PRELOAD-1-NEXT: global_store_dword v0, v2, s[2:3] sc0 sc1
-; PRELOAD-1-NEXT: s_waitcnt vmcnt(0)
-; PRELOAD-1-NEXT: s_endpgm
-;
-; PRELOAD-2-LABEL: byref_preload_arg:
-; PRELOAD-2: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: ; %bb.0:
-; PRELOAD-2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x100
-; PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-2-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-2-NEXT: v_mov_b32_e32 v1, s0
-; PRELOAD-2-NEXT: v_mov_b32_e32 v2, s1
-; PRELOAD-2-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-2-NEXT: s_waitcnt vmcnt(0)
-; PRELOAD-2-NEXT: global_store_dword v0, v2, s[2:3] sc0 sc1
-; PRELOAD-2-NEXT: s_waitcnt vmcnt(0)
-; PRELOAD-2-NEXT: s_endpgm
-;
-; PRELOAD-4-LABEL: byref_preload_arg:
-; PRELOAD-4: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: ; %bb.0:
-; PRELOAD-4-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x100
-; PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-4-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-4-NEXT: v_mov_b32_e32 v1, s0
-; PRELOAD-4-NEXT: v_mov_b32_e32 v2, s1
-; PRELOAD-4-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-4-NEXT: s_waitcnt vmcnt(0)
-; PRELOAD-4-NEXT: global_store_dword v0, v2, s[2:3] sc0 sc1
-; PRELOAD-4-NEXT: s_waitcnt vmcnt(0)
-; PRELOAD-4-NEXT: s_endpgm
-;
-; PRELOAD-8-LABEL: byref_preload_arg:
-; PRELOAD-8: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: ; %bb.0:
-; PRELOAD-8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x100
-; PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-8-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-8-NEXT: v_mov_b32_e32 v1, s0
-; PRELOAD-8-NEXT: v_mov_b32_e32 v2, s1
-; PRELOAD-8-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-8-NEXT: s_waitcnt vmcnt(0)
-; PRELOAD-8-NEXT: global_store_dword v0, v2, s[2:3] sc0 sc1
-; PRELOAD-8-NEXT: s_waitcnt vmcnt(0)
-; PRELOAD-8-NEXT: s_endpgm
+; GFX940-NO-PRELOAD-LABEL: byref_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x100
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s2
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s3
+; GFX940-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[4:5] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: global_store_dword v0, v2, s[4:5] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-1-LABEL: byref_preload_arg:
+; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: ; %bb.0:
+; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x100
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s1
+; GFX940-PRELOAD-1-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-1-NEXT: s_waitcnt vmcnt(0)
+; GFX940-PRELOAD-1-NEXT: global_store_dword v0, v2, s[2:3] sc0 sc1
+; GFX940-PRELOAD-1-NEXT: s_waitcnt vmcnt(0)
+; GFX940-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: byref_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x100
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-2-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v2, s1
+; GFX940-PRELOAD-2-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_waitcnt vmcnt(0)
+; GFX940-PRELOAD-2-NEXT: global_store_dword v0, v2, s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_waitcnt vmcnt(0)
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-4-LABEL: byref_preload_arg:
+; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: ; %bb.0:
+; GFX940-PRELOAD-4-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x100
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-4-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s1
+; GFX940-PRELOAD-4-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-4-NEXT: s_waitcnt vmcnt(0)
+; GFX940-PRELOAD-4-NEXT: global_store_dword v0, v2, s[2:3] sc0 sc1
+; GFX940-PRELOAD-4-NEXT: s_waitcnt vmcnt(0)
+; GFX940-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: byref_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x100
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s1
+; GFX940-PRELOAD-8-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_waitcnt vmcnt(0)
+; GFX940-PRELOAD-8-NEXT: global_store_dword v0, v2, s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_waitcnt vmcnt(0)
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: byref_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x100
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s1
+; GFX90a-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3]
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt vmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: global_store_dword v0, v2, s[2:3]
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt vmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-1-LABEL: byref_preload_arg:
+; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x100
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s1
+; GFX90a-PRELOAD-1-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-1-NEXT: s_waitcnt vmcnt(0)
+; GFX90a-PRELOAD-1-NEXT: global_store_dword v0, v2, s[6:7]
+; GFX90a-PRELOAD-1-NEXT: s_waitcnt vmcnt(0)
+; GFX90a-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: byref_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x100
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-2-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s0
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v2, s1
+; GFX90a-PRELOAD-2-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_waitcnt vmcnt(0)
+; GFX90a-PRELOAD-2-NEXT: global_store_dword v0, v2, s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_waitcnt vmcnt(0)
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-4-LABEL: byref_preload_arg:
+; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-4-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x100
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-4-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s0
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s1
+; GFX90a-PRELOAD-4-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-4-NEXT: s_waitcnt vmcnt(0)
+; GFX90a-PRELOAD-4-NEXT: global_store_dword v0, v2, s[6:7]
+; GFX90a-PRELOAD-4-NEXT: s_waitcnt vmcnt(0)
+; GFX90a-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: byref_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x100
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s0
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s1
+; GFX90a-PRELOAD-8-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_waitcnt vmcnt(0)
+; GFX90a-PRELOAD-8-NEXT: global_store_dword v0, v2, s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_waitcnt vmcnt(0)
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
%in = load i32, ptr addrspace(4) %in.byref
store volatile i32 %in, ptr addrspace(1) %out, align 4
store volatile i32 %after.offset, ptr addrspace(1) %out, align 4
ret void
}
-; TODO: Should do partial preload in cases like these where only part of the arg
-; can be preloaded.
define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32> %in) nounwind {
-; NO-PRELOAD-LABEL: v8i32_arg:
-; NO-PRELOAD: ; %bb.0:
-; NO-PRELOAD-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x20
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v4, 0
-; NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
-; NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s8
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s9
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s10
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v3, s11
-; NO-PRELOAD-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] offset:16 sc0 sc1
-; NO-PRELOAD-NEXT: s_nop 1
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s4
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s5
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s6
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v3, s7
-; NO-PRELOAD-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] sc0 sc1
-; NO-PRELOAD-NEXT: s_endpgm
-;
-; PRELOAD-1-LABEL: v8i32_arg:
-; PRELOAD-1: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: ; %bb.0:
-; PRELOAD-1-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x20
-; PRELOAD-1-NEXT: v_mov_b32_e32 v4, 0
-; PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-1-NEXT: v_mov_b32_e32 v0, s8
-; PRELOAD-1-NEXT: v_mov_b32_e32 v1, s9
-; PRELOAD-1-NEXT: v_mov_b32_e32 v2, s10
-; PRELOAD-1-NEXT: v_mov_b32_e32 v3, s11
-; PRELOAD-1-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:16 sc0 sc1
-; PRELOAD-1-NEXT: s_nop 1
-; PRELOAD-1-NEXT: v_mov_b32_e32 v0, s4
-; PRELOAD-1-NEXT: v_mov_b32_e32 v1, s5
-; PRELOAD-1-NEXT: v_mov_b32_e32 v2, s6
-; PRELOAD-1-NEXT: v_mov_b32_e32 v3, s7
-; PRELOAD-1-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
-; PRELOAD-1-NEXT: s_endpgm
-;
-; PRELOAD-2-LABEL: v8i32_arg:
-; PRELOAD-2: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: ; %bb.0:
-; PRELOAD-2-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x20
-; PRELOAD-2-NEXT: v_mov_b32_e32 v4, 0
-; PRELOAD-2-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-2-NEXT: v_mov_b32_e32 v0, s8
-; PRELOAD-2-NEXT: v_mov_b32_e32 v1, s9
-; PRELOAD-2-NEXT: v_mov_b32_e32 v2, s10
-; PRELOAD-2-NEXT: v_mov_b32_e32 v3, s11
-; PRELOAD-2-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:16 sc0 sc1
-; PRELOAD-2-NEXT: s_nop 1
-; PRELOAD-2-NEXT: v_mov_b32_e32 v0, s4
-; PRELOAD-2-NEXT: v_mov_b32_e32 v1, s5
-; PRELOAD-2-NEXT: v_mov_b32_e32 v2, s6
-; PRELOAD-2-NEXT: v_mov_b32_e32 v3, s7
-; PRELOAD-2-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
-; PRELOAD-2-NEXT: s_endpgm
-;
-; PRELOAD-4-LABEL: v8i32_arg:
-; PRELOAD-4: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: ; %bb.0:
-; PRELOAD-4-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x20
-; PRELOAD-4-NEXT: v_mov_b32_e32 v4, 0
-; PRELOAD-4-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-4-NEXT: v_mov_b32_e32 v0, s8
-; PRELOAD-4-NEXT: v_mov_b32_e32 v1, s9
-; PRELOAD-4-NEXT: v_mov_b32_e32 v2, s10
-; PRELOAD-4-NEXT: v_mov_b32_e32 v3, s11
-; PRELOAD-4-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:16 sc0 sc1
-; PRELOAD-4-NEXT: s_nop 1
-; PRELOAD-4-NEXT: v_mov_b32_e32 v0, s4
-; PRELOAD-4-NEXT: v_mov_b32_e32 v1, s5
-; PRELOAD-4-NEXT: v_mov_b32_e32 v2, s6
-; PRELOAD-4-NEXT: v_mov_b32_e32 v3, s7
-; PRELOAD-4-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
-; PRELOAD-4-NEXT: s_endpgm
-;
-; PRELOAD-8-LABEL: v8i32_arg:
-; PRELOAD-8: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: ; %bb.0:
-; PRELOAD-8-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x20
-; PRELOAD-8-NEXT: v_mov_b32_e32 v4, 0
-; PRELOAD-8-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-8-NEXT: v_mov_b32_e32 v0, s8
-; PRELOAD-8-NEXT: v_mov_b32_e32 v1, s9
-; PRELOAD-8-NEXT: v_mov_b32_e32 v2, s10
-; PRELOAD-8-NEXT: v_mov_b32_e32 v3, s11
-; PRELOAD-8-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:16 sc0 sc1
-; PRELOAD-8-NEXT: s_nop 1
-; PRELOAD-8-NEXT: v_mov_b32_e32 v0, s4
-; PRELOAD-8-NEXT: v_mov_b32_e32 v1, s5
-; PRELOAD-8-NEXT: v_mov_b32_e32 v2, s6
-; PRELOAD-8-NEXT: v_mov_b32_e32 v3, s7
-; PRELOAD-8-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
-; PRELOAD-8-NEXT: s_endpgm
+; GFX940-NO-PRELOAD-LABEL: v8i32_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x20
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v4, 0
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s8
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s9
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s10
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v3, s11
+; GFX940-NO-PRELOAD-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] offset:16 sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_nop 1
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s4
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s6
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v3, s7
+; GFX940-NO-PRELOAD-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-1-LABEL: v8i32_arg:
+; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: ; %bb.0:
+; GFX940-PRELOAD-1-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x20
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v4, 0
+; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, s8
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s9
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s10
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v3, s11
+; GFX940-PRELOAD-1-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:16 sc0 sc1
+; GFX940-PRELOAD-1-NEXT: s_nop 1
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, s4
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s6
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v3, s7
+; GFX940-PRELOAD-1-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
+; GFX940-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: v8i32_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x20
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v4, 0
+; GFX940-PRELOAD-2-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s8
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s9
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v2, s10
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v3, s11
+; GFX940-PRELOAD-2-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:16 sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_nop 1
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s4
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v2, s6
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v3, s7
+; GFX940-PRELOAD-2-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-4-LABEL: v8i32_arg:
+; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: ; %bb.0:
+; GFX940-PRELOAD-4-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x20
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v4, 0
+; GFX940-PRELOAD-4-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s8
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s9
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s10
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v3, s11
+; GFX940-PRELOAD-4-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:16 sc0 sc1
+; GFX940-PRELOAD-4-NEXT: s_nop 1
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s4
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s6
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v3, s7
+; GFX940-PRELOAD-4-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
+; GFX940-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: v8i32_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x20
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v4, 0
+; GFX940-PRELOAD-8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s8
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s9
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s10
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v3, s11
+; GFX940-PRELOAD-8-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:16 sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_nop 1
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s4
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s6
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v3, s7
+; GFX940-PRELOAD-8-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: v8i32_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x20
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v4, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s12
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s13
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s14
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v3, s15
+; GFX90a-NO-PRELOAD-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] offset:16
+; GFX90a-NO-PRELOAD-NEXT: s_nop 0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s8
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s9
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s10
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v3, s11
+; GFX90a-NO-PRELOAD-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-1-LABEL: v8i32_arg:
+; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-1-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x20
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v4, 0
+; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, s12
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s13
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s14
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v3, s15
+; GFX90a-PRELOAD-1-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7] offset:16
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, s8
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s9
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s10
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v3, s11
+; GFX90a-PRELOAD-1-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
+; GFX90a-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: v8i32_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x20
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v4, 0
+; GFX90a-PRELOAD-2-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s12
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s13
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v2, s14
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v3, s15
+; GFX90a-PRELOAD-2-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7] offset:16
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s8
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s9
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v2, s10
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v3, s11
+; GFX90a-PRELOAD-2-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-4-LABEL: v8i32_arg:
+; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-4-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x20
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v4, 0
+; GFX90a-PRELOAD-4-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s12
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s13
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s14
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v3, s15
+; GFX90a-PRELOAD-4-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7] offset:16
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s8
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s9
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s10
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v3, s11
+; GFX90a-PRELOAD-4-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
+; GFX90a-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: v8i32_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x20
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v4, 0
+; GFX90a-PRELOAD-8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s12
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s13
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s14
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v3, s15
+; GFX90a-PRELOAD-8-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7] offset:16
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s8
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s9
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s10
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v3, s11
+; GFX90a-PRELOAD-8-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
store <8 x i32> %in, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3 x i16> %in) nounwind {
-; NO-PRELOAD-LABEL: v3i16_preload_arg:
-; NO-PRELOAD: ; %bb.0:
-; NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
-; NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s3
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s2
-; NO-PRELOAD-NEXT: global_store_short v0, v1, s[0:1] offset:4 sc0 sc1
-; NO-PRELOAD-NEXT: global_store_dword v0, v2, s[0:1] sc0 sc1
-; NO-PRELOAD-NEXT: s_endpgm
-;
-; PRELOAD-1-LABEL: v3i16_preload_arg:
-; PRELOAD-1: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: ; %bb.0:
-; PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
-; PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-1-NEXT: v_mov_b32_e32 v1, s1
-; PRELOAD-1-NEXT: v_mov_b32_e32 v2, s0
-; PRELOAD-1-NEXT: global_store_short v0, v1, s[2:3] offset:4 sc0 sc1
-; PRELOAD-1-NEXT: global_store_dword v0, v2, s[2:3] sc0 sc1
-; PRELOAD-1-NEXT: s_endpgm
-;
-; PRELOAD-2-LABEL: v3i16_preload_arg:
-; PRELOAD-2: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: ; %bb.0:
-; PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-2-NEXT: v_mov_b32_e32 v1, s5
-; PRELOAD-2-NEXT: global_store_short v0, v1, s[2:3] offset:4 sc0 sc1
-; PRELOAD-2-NEXT: v_mov_b32_e32 v1, s4
-; PRELOAD-2-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-2-NEXT: s_endpgm
-;
-; PRELOAD-4-LABEL: v3i16_preload_arg:
-; PRELOAD-4: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: ; %bb.0:
-; PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-4-NEXT: v_mov_b32_e32 v1, s5
-; PRELOAD-4-NEXT: global_store_short v0, v1, s[2:3] offset:4 sc0 sc1
-; PRELOAD-4-NEXT: v_mov_b32_e32 v1, s4
-; PRELOAD-4-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-4-NEXT: s_endpgm
-;
-; PRELOAD-8-LABEL: v3i16_preload_arg:
-; PRELOAD-8: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: ; %bb.0:
-; PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-8-NEXT: v_mov_b32_e32 v1, s5
-; PRELOAD-8-NEXT: global_store_short v0, v1, s[2:3] offset:4 sc0 sc1
-; PRELOAD-8-NEXT: v_mov_b32_e32 v1, s4
-; PRELOAD-8-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; PRELOAD-8-NEXT: s_endpgm
+; GFX940-NO-PRELOAD-LABEL: v3i16_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s3
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s2
+; GFX940-NO-PRELOAD-NEXT: global_store_short v0, v1, s[0:1] offset:4 sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: global_store_dword v0, v2, s[0:1] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-1-LABEL: v3i16_preload_arg:
+; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: ; %bb.0:
+; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s1
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s0
+; GFX940-PRELOAD-1-NEXT: global_store_short v0, v1, s[2:3] offset:4 sc0 sc1
+; GFX940-PRELOAD-1-NEXT: global_store_dword v0, v2, s[2:3] sc0 sc1
+; GFX940-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: v3i16_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-PRELOAD-2-NEXT: global_store_short v0, v1, s[2:3] offset:4 sc0 sc1
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-PRELOAD-2-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-4-LABEL: v3i16_preload_arg:
+; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: ; %bb.0:
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-PRELOAD-4-NEXT: global_store_short v0, v1, s[2:3] offset:4 sc0 sc1
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-PRELOAD-4-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: v3i16_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-PRELOAD-8-NEXT: global_store_short v0, v1, s[2:3] offset:4 sc0 sc1
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-PRELOAD-8-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: v3i16_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s3
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s2
+; GFX90a-NO-PRELOAD-NEXT: global_store_short v0, v1, s[0:1] offset:4
+; GFX90a-NO-PRELOAD-NEXT: global_store_dword v0, v2, s[0:1]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-1-LABEL: v3i16_preload_arg:
+; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s1
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s0
+; GFX90a-PRELOAD-1-NEXT: global_store_short v0, v1, s[6:7] offset:4
+; GFX90a-PRELOAD-1-NEXT: global_store_dword v0, v2, s[6:7]
+; GFX90a-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: v3i16_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s9
+; GFX90a-PRELOAD-2-NEXT: global_store_short v0, v1, s[6:7] offset:4
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s8
+; GFX90a-PRELOAD-2-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-4-LABEL: v3i16_preload_arg:
+; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s9
+; GFX90a-PRELOAD-4-NEXT: global_store_short v0, v1, s[6:7] offset:4
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s8
+; GFX90a-PRELOAD-4-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: v3i16_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s9
+; GFX90a-PRELOAD-8-NEXT: global_store_short v0, v1, s[6:7] offset:4
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s8
+; GFX90a-PRELOAD-8-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
store <3 x i16> %in, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3 x i32> %in) nounwind {
-; NO-PRELOAD-LABEL: v3i32_preload_arg:
-; NO-PRELOAD: ; %bb.0:
-; NO-PRELOAD-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x10
-; NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v3, 0
-; NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s4
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s5
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s6
-; NO-PRELOAD-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
-; NO-PRELOAD-NEXT: s_endpgm
-;
-; PRELOAD-1-LABEL: v3i32_preload_arg:
-; PRELOAD-1: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: ; %bb.0:
-; PRELOAD-1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x10
-; PRELOAD-1-NEXT: v_mov_b32_e32 v3, 0
-; PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-1-NEXT: v_mov_b32_e32 v0, s4
-; PRELOAD-1-NEXT: v_mov_b32_e32 v1, s5
-; PRELOAD-1-NEXT: v_mov_b32_e32 v2, s6
-; PRELOAD-1-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
-; PRELOAD-1-NEXT: s_endpgm
-;
-; PRELOAD-2-LABEL: v3i32_preload_arg:
-; PRELOAD-2: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: ; %bb.0:
-; PRELOAD-2-NEXT: v_mov_b32_e32 v0, s6
-; PRELOAD-2-NEXT: v_mov_b32_e32 v1, s7
-; PRELOAD-2-NEXT: v_mov_b32_e32 v2, s8
-; PRELOAD-2-NEXT: v_mov_b32_e32 v3, 0
-; PRELOAD-2-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
-; PRELOAD-2-NEXT: s_endpgm
-;
-; PRELOAD-4-LABEL: v3i32_preload_arg:
-; PRELOAD-4: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: ; %bb.0:
-; PRELOAD-4-NEXT: v_mov_b32_e32 v0, s6
-; PRELOAD-4-NEXT: v_mov_b32_e32 v1, s7
-; PRELOAD-4-NEXT: v_mov_b32_e32 v2, s8
-; PRELOAD-4-NEXT: v_mov_b32_e32 v3, 0
-; PRELOAD-4-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
-; PRELOAD-4-NEXT: s_endpgm
-;
-; PRELOAD-8-LABEL: v3i32_preload_arg:
-; PRELOAD-8: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: ; %bb.0:
-; PRELOAD-8-NEXT: v_mov_b32_e32 v0, s6
-; PRELOAD-8-NEXT: v_mov_b32_e32 v1, s7
-; PRELOAD-8-NEXT: v_mov_b32_e32 v2, s8
-; PRELOAD-8-NEXT: v_mov_b32_e32 v3, 0
-; PRELOAD-8-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
-; PRELOAD-8-NEXT: s_endpgm
+; GFX940-NO-PRELOAD-LABEL: v3i32_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x10
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v3, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s4
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s6
+; GFX940-NO-PRELOAD-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-1-LABEL: v3i32_preload_arg:
+; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: ; %bb.0:
+; GFX940-PRELOAD-1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x10
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v3, 0
+; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, s4
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s6
+; GFX940-PRELOAD-1-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
+; GFX940-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: v3i32_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s6
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s7
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v2, s8
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v3, 0
+; GFX940-PRELOAD-2-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-4-LABEL: v3i32_preload_arg:
+; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: ; %bb.0:
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s6
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s7
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s8
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v3, 0
+; GFX940-PRELOAD-4-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
+; GFX940-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: v3i32_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s6
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s7
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s8
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v3, 0
+; GFX940-PRELOAD-8-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: v3i32_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v3, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s1
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s2
+; GFX90a-NO-PRELOAD-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-1-LABEL: v3i32_preload_arg:
+; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-1-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v3, 0
+; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, s0
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s1
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s2
+; GFX90a-PRELOAD-1-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
+; GFX90a-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: v3i32_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s10
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s11
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v2, s12
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v3, 0
+; GFX90a-PRELOAD-2-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-4-LABEL: v3i32_preload_arg:
+; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s10
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s11
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s12
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v3, 0
+; GFX90a-PRELOAD-4-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
+; GFX90a-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: v3i32_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s10
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s11
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s12
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v3, 0
+; GFX90a-PRELOAD-8-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
store <3 x i32> %in, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3 x float> %in) nounwind {
-; NO-PRELOAD-LABEL: v3f32_preload_arg:
-; NO-PRELOAD: ; %bb.0:
-; NO-PRELOAD-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x10
-; NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v3, 0
-; NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s4
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s5
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s6
-; NO-PRELOAD-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
-; NO-PRELOAD-NEXT: s_endpgm
-;
-; PRELOAD-1-LABEL: v3f32_preload_arg:
-; PRELOAD-1: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: ; %bb.0:
-; PRELOAD-1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x10
-; PRELOAD-1-NEXT: v_mov_b32_e32 v3, 0
-; PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-1-NEXT: v_mov_b32_e32 v0, s4
-; PRELOAD-1-NEXT: v_mov_b32_e32 v1, s5
-; PRELOAD-1-NEXT: v_mov_b32_e32 v2, s6
-; PRELOAD-1-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
-; PRELOAD-1-NEXT: s_endpgm
-;
-; PRELOAD-2-LABEL: v3f32_preload_arg:
-; PRELOAD-2: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: ; %bb.0:
-; PRELOAD-2-NEXT: v_mov_b32_e32 v3, 0
-; PRELOAD-2-NEXT: v_mov_b32_e32 v0, s6
-; PRELOAD-2-NEXT: v_mov_b32_e32 v1, s7
-; PRELOAD-2-NEXT: v_mov_b32_e32 v2, s8
-; PRELOAD-2-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
-; PRELOAD-2-NEXT: s_endpgm
-;
-; PRELOAD-4-LABEL: v3f32_preload_arg:
-; PRELOAD-4: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: ; %bb.0:
-; PRELOAD-4-NEXT: v_mov_b32_e32 v3, 0
-; PRELOAD-4-NEXT: v_mov_b32_e32 v0, s6
-; PRELOAD-4-NEXT: v_mov_b32_e32 v1, s7
-; PRELOAD-4-NEXT: v_mov_b32_e32 v2, s8
-; PRELOAD-4-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
-; PRELOAD-4-NEXT: s_endpgm
-;
-; PRELOAD-8-LABEL: v3f32_preload_arg:
-; PRELOAD-8: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: ; %bb.0:
-; PRELOAD-8-NEXT: v_mov_b32_e32 v3, 0
-; PRELOAD-8-NEXT: v_mov_b32_e32 v0, s6
-; PRELOAD-8-NEXT: v_mov_b32_e32 v1, s7
-; PRELOAD-8-NEXT: v_mov_b32_e32 v2, s8
-; PRELOAD-8-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
-; PRELOAD-8-NEXT: s_endpgm
+; GFX940-NO-PRELOAD-LABEL: v3f32_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x10
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v3, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s4
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s6
+; GFX940-NO-PRELOAD-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-1-LABEL: v3f32_preload_arg:
+; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: ; %bb.0:
+; GFX940-PRELOAD-1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x10
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v3, 0
+; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, s4
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s6
+; GFX940-PRELOAD-1-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
+; GFX940-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: v3f32_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v3, 0
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s6
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s7
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v2, s8
+; GFX940-PRELOAD-2-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-4-LABEL: v3f32_preload_arg:
+; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: ; %bb.0:
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v3, 0
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s6
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s7
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s8
+; GFX940-PRELOAD-4-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
+; GFX940-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: v3f32_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v3, 0
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s6
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s7
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s8
+; GFX940-PRELOAD-8-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: v3f32_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v3, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s1
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s2
+; GFX90a-NO-PRELOAD-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-1-LABEL: v3f32_preload_arg:
+; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-1-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v3, 0
+; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, s0
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s1
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s2
+; GFX90a-PRELOAD-1-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
+; GFX90a-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: v3f32_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v3, 0
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s10
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s11
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v2, s12
+; GFX90a-PRELOAD-2-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-4-LABEL: v3f32_preload_arg:
+; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v3, 0
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s10
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s11
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s12
+; GFX90a-PRELOAD-4-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
+; GFX90a-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: v3f32_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v3, 0
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s10
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s11
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s12
+; GFX90a-PRELOAD-8-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
store <3 x float> %in, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5 x i8> %in) nounwind {
-; NO-PRELOAD-LABEL: v5i8_preload_arg:
-; NO-PRELOAD: ; %bb.0:
-; NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
-; NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s3
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s2
-; NO-PRELOAD-NEXT: global_store_byte v0, v1, s[0:1] offset:4 sc0 sc1
-; NO-PRELOAD-NEXT: global_store_dword v0, v2, s[0:1] sc0 sc1
-; NO-PRELOAD-NEXT: s_endpgm
-;
-; PRELOAD-1-LABEL: v5i8_preload_arg:
-; PRELOAD-1: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: ; %bb.0:
-; PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
-; PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-1-NEXT: v_mov_b32_e32 v1, s1
-; PRELOAD-1-NEXT: v_mov_b32_e32 v2, s0
-; PRELOAD-1-NEXT: global_store_byte v0, v1, s[2:3] offset:4 sc0 sc1
-; PRELOAD-1-NEXT: global_store_dword v0, v2, s[2:3] sc0 sc1
-; PRELOAD-1-NEXT: s_endpgm
-;
-; PRELOAD-2-LABEL: v5i8_preload_arg:
-; PRELOAD-2: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: ; %bb.0:
-; PRELOAD-2-NEXT: s_lshr_b32 s0, s4, 8
-; PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
-; PRELOAD-2-NEXT: s_lshr_b32 s0, s4, 24
-; PRELOAD-2-NEXT: v_lshlrev_b16_e64 v1, 8, s0
-; PRELOAD-2-NEXT: s_lshr_b32 s0, s4, 16
-; PRELOAD-2-NEXT: v_or_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; PRELOAD-2-NEXT: v_or_b32_sdwa v1, s0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; PRELOAD-2-NEXT: v_mov_b32_e32 v2, s5
-; PRELOAD-2-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; PRELOAD-2-NEXT: v_mov_b32_e32 v1, 0
-; PRELOAD-2-NEXT: global_store_byte v1, v2, s[2:3] offset:4 sc0 sc1
-; PRELOAD-2-NEXT: global_store_dword v1, v0, s[2:3] sc0 sc1
-; PRELOAD-2-NEXT: s_endpgm
-;
-; PRELOAD-4-LABEL: v5i8_preload_arg:
-; PRELOAD-4: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: ; %bb.0:
-; PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 8
-; PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
-; PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 24
-; PRELOAD-4-NEXT: v_lshlrev_b16_e64 v1, 8, s0
-; PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 16
-; PRELOAD-4-NEXT: v_or_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; PRELOAD-4-NEXT: v_or_b32_sdwa v1, s0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; PRELOAD-4-NEXT: v_mov_b32_e32 v2, s5
-; PRELOAD-4-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; PRELOAD-4-NEXT: v_mov_b32_e32 v1, 0
-; PRELOAD-4-NEXT: global_store_byte v1, v2, s[2:3] offset:4 sc0 sc1
-; PRELOAD-4-NEXT: global_store_dword v1, v0, s[2:3] sc0 sc1
-; PRELOAD-4-NEXT: s_endpgm
-;
-; PRELOAD-8-LABEL: v5i8_preload_arg:
-; PRELOAD-8: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: ; %bb.0:
-; PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 8
-; PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
-; PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 24
-; PRELOAD-8-NEXT: v_lshlrev_b16_e64 v1, 8, s0
-; PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 16
-; PRELOAD-8-NEXT: v_or_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; PRELOAD-8-NEXT: v_or_b32_sdwa v1, s0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; PRELOAD-8-NEXT: v_mov_b32_e32 v2, s5
-; PRELOAD-8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; PRELOAD-8-NEXT: v_mov_b32_e32 v1, 0
-; PRELOAD-8-NEXT: global_store_byte v1, v2, s[2:3] offset:4 sc0 sc1
-; PRELOAD-8-NEXT: global_store_dword v1, v0, s[2:3] sc0 sc1
-; PRELOAD-8-NEXT: s_endpgm
+; GFX940-NO-PRELOAD-LABEL: v5i8_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s3
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s2
+; GFX940-NO-PRELOAD-NEXT: global_store_byte v0, v1, s[0:1] offset:4 sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: global_store_dword v0, v2, s[0:1] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-1-LABEL: v5i8_preload_arg:
+; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: ; %bb.0:
+; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s1
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s0
+; GFX940-PRELOAD-1-NEXT: global_store_byte v0, v1, s[2:3] offset:4 sc0 sc1
+; GFX940-PRELOAD-1-NEXT: global_store_dword v0, v2, s[2:3] sc0 sc1
+; GFX940-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: v5i8_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: s_lshr_b32 s0, s4, 8
+; GFX940-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX940-PRELOAD-2-NEXT: s_lshr_b32 s0, s4, 24
+; GFX940-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v1, 8, s0
+; GFX940-PRELOAD-2-NEXT: s_lshr_b32 s0, s4, 16
+; GFX940-PRELOAD-2-NEXT: v_or_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-2-NEXT: v_or_b32_sdwa v1, s0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v2, s5
+; GFX940-PRELOAD-2-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, 0
+; GFX940-PRELOAD-2-NEXT: global_store_byte v1, v2, s[2:3] offset:4 sc0 sc1
+; GFX940-PRELOAD-2-NEXT: global_store_dword v1, v0, s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-4-LABEL: v5i8_preload_arg:
+; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: ; %bb.0:
+; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 8
+; GFX940-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 24
+; GFX940-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v1, 8, s0
+; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 16
+; GFX940-PRELOAD-4-NEXT: v_or_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-4-NEXT: v_or_b32_sdwa v1, s0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s5
+; GFX940-PRELOAD-4-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, 0
+; GFX940-PRELOAD-4-NEXT: global_store_byte v1, v2, s[2:3] offset:4 sc0 sc1
+; GFX940-PRELOAD-4-NEXT: global_store_dword v1, v0, s[2:3] sc0 sc1
+; GFX940-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: v5i8_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 8
+; GFX940-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 24
+; GFX940-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v1, 8, s0
+; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 16
+; GFX940-PRELOAD-8-NEXT: v_or_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-8-NEXT: v_or_b32_sdwa v1, s0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s5
+; GFX940-PRELOAD-8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, 0
+; GFX940-PRELOAD-8-NEXT: global_store_byte v1, v2, s[2:3] offset:4 sc0 sc1
+; GFX940-PRELOAD-8-NEXT: global_store_dword v1, v0, s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: v5i8_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s3
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s2
+; GFX90a-NO-PRELOAD-NEXT: global_store_byte v0, v1, s[0:1] offset:4
+; GFX90a-NO-PRELOAD-NEXT: global_store_dword v0, v2, s[0:1]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-1-LABEL: v5i8_preload_arg:
+; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s1
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s0
+; GFX90a-PRELOAD-1-NEXT: global_store_byte v0, v1, s[6:7] offset:4
+; GFX90a-PRELOAD-1-NEXT: global_store_dword v0, v2, s[6:7]
+; GFX90a-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: v5i8_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: s_lshr_b32 s0, s8, 8
+; GFX90a-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX90a-PRELOAD-2-NEXT: s_lshr_b32 s0, s8, 24
+; GFX90a-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v1, 8, s0
+; GFX90a-PRELOAD-2-NEXT: s_lshr_b32 s0, s8, 16
+; GFX90a-PRELOAD-2-NEXT: v_or_b32_sdwa v0, s8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-2-NEXT: v_or_b32_sdwa v1, s0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-2-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, 0
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v2, s9
+; GFX90a-PRELOAD-2-NEXT: global_store_byte v1, v2, s[6:7] offset:4
+; GFX90a-PRELOAD-2-NEXT: global_store_dword v1, v0, s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-4-LABEL: v5i8_preload_arg:
+; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s8, 8
+; GFX90a-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s8, 24
+; GFX90a-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v1, 8, s0
+; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s8, 16
+; GFX90a-PRELOAD-4-NEXT: v_or_b32_sdwa v0, s8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-4-NEXT: v_or_b32_sdwa v1, s0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-4-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, 0
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s9
+; GFX90a-PRELOAD-4-NEXT: global_store_byte v1, v2, s[6:7] offset:4
+; GFX90a-PRELOAD-4-NEXT: global_store_dword v1, v0, s[6:7]
+; GFX90a-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: v5i8_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s8, 8
+; GFX90a-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s8, 24
+; GFX90a-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v1, 8, s0
+; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s8, 16
+; GFX90a-PRELOAD-8-NEXT: v_or_b32_sdwa v0, s8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-8-NEXT: v_or_b32_sdwa v1, s0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, 0
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s9
+; GFX90a-PRELOAD-8-NEXT: global_store_byte v1, v2, s[6:7] offset:4
+; GFX90a-PRELOAD-8-NEXT: global_store_dword v1, v0, s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
store <5 x i8> %in, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x double> %in) nounwind {
-; NO-PRELOAD-LABEL: v5f64_arg:
-; NO-PRELOAD: ; %bb.0:
-; NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x60
-; NO-PRELOAD-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x40
-; NO-PRELOAD-NEXT: s_load_dwordx2 s[12:13], s[0:1], 0x0
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v4, 0
-; NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
-; NO-PRELOAD-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s8
-; NO-PRELOAD-NEXT: global_store_dwordx2 v4, v[2:3], s[12:13] offset:32 sc0 sc1
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s9
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s10
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v3, s11
-; NO-PRELOAD-NEXT: global_store_dwordx4 v4, v[0:3], s[12:13] offset:16 sc0 sc1
-; NO-PRELOAD-NEXT: s_nop 1
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s4
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s5
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s6
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v3, s7
-; NO-PRELOAD-NEXT: global_store_dwordx4 v4, v[0:3], s[12:13] sc0 sc1
-; NO-PRELOAD-NEXT: s_endpgm
-;
-; PRELOAD-1-LABEL: v5f64_arg:
-; PRELOAD-1: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: ; %bb.0:
-; PRELOAD-1-NEXT: s_load_dwordx2 s[12:13], s[0:1], 0x60
-; PRELOAD-1-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x40
-; PRELOAD-1-NEXT: v_mov_b32_e32 v4, 0
-; PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-1-NEXT: v_mov_b64_e32 v[2:3], s[12:13]
-; PRELOAD-1-NEXT: v_mov_b32_e32 v0, s8
-; PRELOAD-1-NEXT: global_store_dwordx2 v4, v[2:3], s[2:3] offset:32 sc0 sc1
-; PRELOAD-1-NEXT: v_mov_b32_e32 v1, s9
-; PRELOAD-1-NEXT: v_mov_b32_e32 v2, s10
-; PRELOAD-1-NEXT: v_mov_b32_e32 v3, s11
-; PRELOAD-1-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:16 sc0 sc1
-; PRELOAD-1-NEXT: s_nop 1
-; PRELOAD-1-NEXT: v_mov_b32_e32 v0, s4
-; PRELOAD-1-NEXT: v_mov_b32_e32 v1, s5
-; PRELOAD-1-NEXT: v_mov_b32_e32 v2, s6
-; PRELOAD-1-NEXT: v_mov_b32_e32 v3, s7
-; PRELOAD-1-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
-; PRELOAD-1-NEXT: s_endpgm
-;
-; PRELOAD-2-LABEL: v5f64_arg:
-; PRELOAD-2: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: ; %bb.0:
-; PRELOAD-2-NEXT: s_load_dwordx2 s[12:13], s[0:1], 0x60
-; PRELOAD-2-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x40
-; PRELOAD-2-NEXT: v_mov_b32_e32 v4, 0
-; PRELOAD-2-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-2-NEXT: v_mov_b64_e32 v[2:3], s[12:13]
-; PRELOAD-2-NEXT: v_mov_b32_e32 v0, s8
-; PRELOAD-2-NEXT: global_store_dwordx2 v4, v[2:3], s[2:3] offset:32 sc0 sc1
-; PRELOAD-2-NEXT: v_mov_b32_e32 v1, s9
-; PRELOAD-2-NEXT: v_mov_b32_e32 v2, s10
-; PRELOAD-2-NEXT: v_mov_b32_e32 v3, s11
-; PRELOAD-2-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:16 sc0 sc1
-; PRELOAD-2-NEXT: s_nop 1
-; PRELOAD-2-NEXT: v_mov_b32_e32 v0, s4
-; PRELOAD-2-NEXT: v_mov_b32_e32 v1, s5
-; PRELOAD-2-NEXT: v_mov_b32_e32 v2, s6
-; PRELOAD-2-NEXT: v_mov_b32_e32 v3, s7
-; PRELOAD-2-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
-; PRELOAD-2-NEXT: s_endpgm
-;
-; PRELOAD-4-LABEL: v5f64_arg:
-; PRELOAD-4: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: ; %bb.0:
-; PRELOAD-4-NEXT: s_load_dwordx2 s[12:13], s[0:1], 0x60
-; PRELOAD-4-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x40
-; PRELOAD-4-NEXT: v_mov_b32_e32 v4, 0
-; PRELOAD-4-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-4-NEXT: v_mov_b64_e32 v[2:3], s[12:13]
-; PRELOAD-4-NEXT: v_mov_b32_e32 v0, s8
-; PRELOAD-4-NEXT: global_store_dwordx2 v4, v[2:3], s[2:3] offset:32 sc0 sc1
-; PRELOAD-4-NEXT: v_mov_b32_e32 v1, s9
-; PRELOAD-4-NEXT: v_mov_b32_e32 v2, s10
-; PRELOAD-4-NEXT: v_mov_b32_e32 v3, s11
-; PRELOAD-4-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:16 sc0 sc1
-; PRELOAD-4-NEXT: s_nop 1
-; PRELOAD-4-NEXT: v_mov_b32_e32 v0, s4
-; PRELOAD-4-NEXT: v_mov_b32_e32 v1, s5
-; PRELOAD-4-NEXT: v_mov_b32_e32 v2, s6
-; PRELOAD-4-NEXT: v_mov_b32_e32 v3, s7
-; PRELOAD-4-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
-; PRELOAD-4-NEXT: s_endpgm
-;
-; PRELOAD-8-LABEL: v5f64_arg:
-; PRELOAD-8: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: ; %bb.0:
-; PRELOAD-8-NEXT: s_load_dwordx2 s[12:13], s[0:1], 0x60
-; PRELOAD-8-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x40
-; PRELOAD-8-NEXT: v_mov_b32_e32 v4, 0
-; PRELOAD-8-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-8-NEXT: v_mov_b64_e32 v[2:3], s[12:13]
-; PRELOAD-8-NEXT: v_mov_b32_e32 v0, s8
-; PRELOAD-8-NEXT: global_store_dwordx2 v4, v[2:3], s[2:3] offset:32 sc0 sc1
-; PRELOAD-8-NEXT: v_mov_b32_e32 v1, s9
-; PRELOAD-8-NEXT: v_mov_b32_e32 v2, s10
-; PRELOAD-8-NEXT: v_mov_b32_e32 v3, s11
-; PRELOAD-8-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:16 sc0 sc1
-; PRELOAD-8-NEXT: s_nop 1
-; PRELOAD-8-NEXT: v_mov_b32_e32 v0, s4
-; PRELOAD-8-NEXT: v_mov_b32_e32 v1, s5
-; PRELOAD-8-NEXT: v_mov_b32_e32 v2, s6
-; PRELOAD-8-NEXT: v_mov_b32_e32 v3, s7
-; PRELOAD-8-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
-; PRELOAD-8-NEXT: s_endpgm
+; GFX940-NO-PRELOAD-LABEL: v5f64_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x60
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x40
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[12:13], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v4, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s8
+; GFX940-NO-PRELOAD-NEXT: global_store_dwordx2 v4, v[2:3], s[12:13] offset:32 sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s9
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s10
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v3, s11
+; GFX940-NO-PRELOAD-NEXT: global_store_dwordx4 v4, v[0:3], s[12:13] offset:16 sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_nop 1
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s4
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s6
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v3, s7
+; GFX940-NO-PRELOAD-NEXT: global_store_dwordx4 v4, v[0:3], s[12:13] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-1-LABEL: v5f64_arg:
+; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: ; %bb.0:
+; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[12:13], s[0:1], 0x60
+; GFX940-PRELOAD-1-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x40
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v4, 0
+; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-1-NEXT: v_mov_b64_e32 v[2:3], s[12:13]
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, s8
+; GFX940-PRELOAD-1-NEXT: global_store_dwordx2 v4, v[2:3], s[2:3] offset:32 sc0 sc1
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s9
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s10
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v3, s11
+; GFX940-PRELOAD-1-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:16 sc0 sc1
+; GFX940-PRELOAD-1-NEXT: s_nop 1
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, s4
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s6
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v3, s7
+; GFX940-PRELOAD-1-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
+; GFX940-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: v5f64_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: s_load_dwordx2 s[12:13], s[0:1], 0x60
+; GFX940-PRELOAD-2-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x40
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v4, 0
+; GFX940-PRELOAD-2-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-2-NEXT: v_mov_b64_e32 v[2:3], s[12:13]
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s8
+; GFX940-PRELOAD-2-NEXT: global_store_dwordx2 v4, v[2:3], s[2:3] offset:32 sc0 sc1
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s9
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v2, s10
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v3, s11
+; GFX940-PRELOAD-2-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:16 sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_nop 1
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s4
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v2, s6
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v3, s7
+; GFX940-PRELOAD-2-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-4-LABEL: v5f64_arg:
+; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: ; %bb.0:
+; GFX940-PRELOAD-4-NEXT: s_load_dwordx2 s[12:13], s[0:1], 0x60
+; GFX940-PRELOAD-4-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x40
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v4, 0
+; GFX940-PRELOAD-4-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-4-NEXT: v_mov_b64_e32 v[2:3], s[12:13]
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s8
+; GFX940-PRELOAD-4-NEXT: global_store_dwordx2 v4, v[2:3], s[2:3] offset:32 sc0 sc1
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s9
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s10
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v3, s11
+; GFX940-PRELOAD-4-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:16 sc0 sc1
+; GFX940-PRELOAD-4-NEXT: s_nop 1
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s4
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s6
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v3, s7
+; GFX940-PRELOAD-4-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
+; GFX940-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: v5f64_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: s_load_dwordx2 s[12:13], s[0:1], 0x60
+; GFX940-PRELOAD-8-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x40
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v4, 0
+; GFX940-PRELOAD-8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-8-NEXT: v_mov_b64_e32 v[2:3], s[12:13]
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s8
+; GFX940-PRELOAD-8-NEXT: global_store_dwordx2 v4, v[2:3], s[2:3] offset:32 sc0 sc1
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s9
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s10
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v3, s11
+; GFX940-PRELOAD-8-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:16 sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_nop 1
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s4
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s6
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v3, s7
+; GFX940-PRELOAD-8-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: v5f64_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x60
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x40
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v4, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_pk_mov_b32 v[2:3], s[0:1], s[0:1] op_sel:[0,1]
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s12
+; GFX90a-NO-PRELOAD-NEXT: global_store_dwordx2 v4, v[2:3], s[2:3] offset:32
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s13
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s14
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v3, s15
+; GFX90a-NO-PRELOAD-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:16
+; GFX90a-NO-PRELOAD-NEXT: s_nop 0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s8
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s9
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s10
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v3, s11
+; GFX90a-NO-PRELOAD-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-1-LABEL: v5f64_arg:
+; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x60
+; GFX90a-PRELOAD-1-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x40
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v4, 0
+; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-1-NEXT: v_pk_mov_b32 v[2:3], s[0:1], s[0:1] op_sel:[0,1]
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, s12
+; GFX90a-PRELOAD-1-NEXT: global_store_dwordx2 v4, v[2:3], s[6:7] offset:32
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s13
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s14
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v3, s15
+; GFX90a-PRELOAD-1-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7] offset:16
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, s8
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s9
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s10
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v3, s11
+; GFX90a-PRELOAD-1-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
+; GFX90a-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: v5f64_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x60
+; GFX90a-PRELOAD-2-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x40
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v4, 0
+; GFX90a-PRELOAD-2-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-2-NEXT: v_pk_mov_b32 v[2:3], s[0:1], s[0:1] op_sel:[0,1]
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s12
+; GFX90a-PRELOAD-2-NEXT: global_store_dwordx2 v4, v[2:3], s[6:7] offset:32
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s13
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v2, s14
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v3, s15
+; GFX90a-PRELOAD-2-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7] offset:16
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s8
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s9
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v2, s10
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v3, s11
+; GFX90a-PRELOAD-2-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-4-LABEL: v5f64_arg:
+; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-4-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x60
+; GFX90a-PRELOAD-4-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x40
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v4, 0
+; GFX90a-PRELOAD-4-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-4-NEXT: v_pk_mov_b32 v[2:3], s[0:1], s[0:1] op_sel:[0,1]
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s12
+; GFX90a-PRELOAD-4-NEXT: global_store_dwordx2 v4, v[2:3], s[6:7] offset:32
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s13
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s14
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v3, s15
+; GFX90a-PRELOAD-4-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7] offset:16
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s8
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s9
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s10
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v3, s11
+; GFX90a-PRELOAD-4-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
+; GFX90a-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: v5f64_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x60
+; GFX90a-PRELOAD-8-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x40
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v4, 0
+; GFX90a-PRELOAD-8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-8-NEXT: v_pk_mov_b32 v[2:3], s[0:1], s[0:1] op_sel:[0,1]
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s12
+; GFX90a-PRELOAD-8-NEXT: global_store_dwordx2 v4, v[2:3], s[6:7] offset:32
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s13
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s14
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v3, s15
+; GFX90a-PRELOAD-8-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7] offset:16
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s8
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s9
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s10
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v3, s11
+; GFX90a-PRELOAD-8-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
store <5 x double> %in, ptr addrspace(1) %out, align 8
ret void
}
define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in) {
-; NO-PRELOAD-LABEL: v8i8_preload_arg:
-; NO-PRELOAD: ; %bb.0:
-; NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v2, 0
-; NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
-; NO-PRELOAD-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; NO-PRELOAD-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] sc0 sc1
-; NO-PRELOAD-NEXT: s_endpgm
-;
-; PRELOAD-1-LABEL: v8i8_preload_arg:
-; PRELOAD-1: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: ; %bb.0:
-; PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
-; PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
-; PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-1-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
-; PRELOAD-1-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
-; PRELOAD-1-NEXT: s_endpgm
-;
-; PRELOAD-2-LABEL: v8i8_preload_arg:
-; PRELOAD-2: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: ; %bb.0:
-; PRELOAD-2-NEXT: s_lshr_b32 s0, s5, 8
-; PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
-; PRELOAD-2-NEXT: s_lshr_b32 s0, s5, 24
-; PRELOAD-2-NEXT: v_lshlrev_b16_e64 v1, 8, s0
-; PRELOAD-2-NEXT: s_lshr_b32 s0, s5, 16
-; PRELOAD-2-NEXT: v_or_b32_sdwa v0, s5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; PRELOAD-2-NEXT: v_or_b32_sdwa v1, s0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; PRELOAD-2-NEXT: s_lshr_b32 s0, s4, 8
-; PRELOAD-2-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
-; PRELOAD-2-NEXT: s_lshr_b32 s0, s4, 24
-; PRELOAD-2-NEXT: v_lshlrev_b16_e64 v2, 8, s0
-; PRELOAD-2-NEXT: s_lshr_b32 s0, s4, 16
-; PRELOAD-2-NEXT: v_or_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; PRELOAD-2-NEXT: v_or_b32_sdwa v2, s0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; PRELOAD-2-NEXT: v_mov_b32_e32 v2, 0
-; PRELOAD-2-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
-; PRELOAD-2-NEXT: s_endpgm
-;
-; PRELOAD-4-LABEL: v8i8_preload_arg:
-; PRELOAD-4: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: ; %bb.0:
-; PRELOAD-4-NEXT: s_lshr_b32 s0, s5, 8
-; PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
-; PRELOAD-4-NEXT: s_lshr_b32 s0, s5, 24
-; PRELOAD-4-NEXT: v_lshlrev_b16_e64 v1, 8, s0
-; PRELOAD-4-NEXT: s_lshr_b32 s0, s5, 16
-; PRELOAD-4-NEXT: v_or_b32_sdwa v0, s5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; PRELOAD-4-NEXT: v_or_b32_sdwa v1, s0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 8
-; PRELOAD-4-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
-; PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 24
-; PRELOAD-4-NEXT: v_lshlrev_b16_e64 v2, 8, s0
-; PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 16
-; PRELOAD-4-NEXT: v_or_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; PRELOAD-4-NEXT: v_or_b32_sdwa v2, s0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
-; PRELOAD-4-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
-; PRELOAD-4-NEXT: s_endpgm
-;
-; PRELOAD-8-LABEL: v8i8_preload_arg:
-; PRELOAD-8: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: ; %bb.0:
-; PRELOAD-8-NEXT: s_lshr_b32 s0, s5, 8
-; PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
-; PRELOAD-8-NEXT: s_lshr_b32 s0, s5, 24
-; PRELOAD-8-NEXT: v_lshlrev_b16_e64 v1, 8, s0
-; PRELOAD-8-NEXT: s_lshr_b32 s0, s5, 16
-; PRELOAD-8-NEXT: v_or_b32_sdwa v0, s5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; PRELOAD-8-NEXT: v_or_b32_sdwa v1, s0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 8
-; PRELOAD-8-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
-; PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 24
-; PRELOAD-8-NEXT: v_lshlrev_b16_e64 v2, 8, s0
-; PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 16
-; PRELOAD-8-NEXT: v_or_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; PRELOAD-8-NEXT: v_or_b32_sdwa v2, s0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; PRELOAD-8-NEXT: v_mov_b32_e32 v2, 0
-; PRELOAD-8-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
-; PRELOAD-8-NEXT: s_endpgm
+; GFX940-NO-PRELOAD-LABEL: v8i8_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX940-NO-PRELOAD-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-1-LABEL: v8i8_preload_arg:
+; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: ; %bb.0:
+; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
+; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-1-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; GFX940-PRELOAD-1-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
+; GFX940-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: v8i8_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: s_lshr_b32 s0, s5, 8
+; GFX940-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX940-PRELOAD-2-NEXT: s_lshr_b32 s0, s5, 24
+; GFX940-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v1, 8, s0
+; GFX940-PRELOAD-2-NEXT: s_lshr_b32 s0, s5, 16
+; GFX940-PRELOAD-2-NEXT: v_or_b32_sdwa v0, s5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-2-NEXT: v_or_b32_sdwa v1, s0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-2-NEXT: s_lshr_b32 s0, s4, 8
+; GFX940-PRELOAD-2-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX940-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX940-PRELOAD-2-NEXT: s_lshr_b32 s0, s4, 24
+; GFX940-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v2, 8, s0
+; GFX940-PRELOAD-2-NEXT: s_lshr_b32 s0, s4, 16
+; GFX940-PRELOAD-2-NEXT: v_or_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-2-NEXT: v_or_b32_sdwa v2, s0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v2, 0
+; GFX940-PRELOAD-2-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-4-LABEL: v8i8_preload_arg:
+; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: ; %bb.0:
+; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s5, 8
+; GFX940-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s5, 24
+; GFX940-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v1, 8, s0
+; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s5, 16
+; GFX940-PRELOAD-4-NEXT: v_or_b32_sdwa v0, s5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-4-NEXT: v_or_b32_sdwa v1, s0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 8
+; GFX940-PRELOAD-4-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX940-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 24
+; GFX940-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v2, 8, s0
+; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 16
+; GFX940-PRELOAD-4-NEXT: v_or_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-4-NEXT: v_or_b32_sdwa v2, s0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
+; GFX940-PRELOAD-4-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
+; GFX940-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: v8i8_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s5, 8
+; GFX940-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s5, 24
+; GFX940-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v1, 8, s0
+; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s5, 16
+; GFX940-PRELOAD-8-NEXT: v_or_b32_sdwa v0, s5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-8-NEXT: v_or_b32_sdwa v1, s0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 8
+; GFX940-PRELOAD-8-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX940-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 24
+; GFX940-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v2, 8, s0
+; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 16
+; GFX940-PRELOAD-8-NEXT: v_or_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-8-NEXT: v_or_b32_sdwa v2, s0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v2, 0
+; GFX940-PRELOAD-8-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: v8i8_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1]
+; GFX90a-NO-PRELOAD-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-1-LABEL: v8i8_preload_arg:
+; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
+; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-1-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1]
+; GFX90a-PRELOAD-1-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
+; GFX90a-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: v8i8_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: s_lshr_b32 s0, s9, 8
+; GFX90a-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX90a-PRELOAD-2-NEXT: s_lshr_b32 s0, s9, 24
+; GFX90a-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v1, 8, s0
+; GFX90a-PRELOAD-2-NEXT: s_lshr_b32 s0, s9, 16
+; GFX90a-PRELOAD-2-NEXT: v_or_b32_sdwa v0, s9, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-2-NEXT: v_or_b32_sdwa v1, s0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-2-NEXT: s_lshr_b32 s0, s8, 8
+; GFX90a-PRELOAD-2-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX90a-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX90a-PRELOAD-2-NEXT: s_lshr_b32 s0, s8, 24
+; GFX90a-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v2, 8, s0
+; GFX90a-PRELOAD-2-NEXT: s_lshr_b32 s0, s8, 16
+; GFX90a-PRELOAD-2-NEXT: v_or_b32_sdwa v0, s8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-2-NEXT: v_or_b32_sdwa v2, s0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-2-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v2, 0
+; GFX90a-PRELOAD-2-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-4-LABEL: v8i8_preload_arg:
+; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s9, 8
+; GFX90a-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s9, 24
+; GFX90a-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v1, 8, s0
+; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s9, 16
+; GFX90a-PRELOAD-4-NEXT: v_or_b32_sdwa v0, s9, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-4-NEXT: v_or_b32_sdwa v1, s0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s8, 8
+; GFX90a-PRELOAD-4-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX90a-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s8, 24
+; GFX90a-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v2, 8, s0
+; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s8, 16
+; GFX90a-PRELOAD-4-NEXT: v_or_b32_sdwa v0, s8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-4-NEXT: v_or_b32_sdwa v2, s0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-4-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
+; GFX90a-PRELOAD-4-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
+; GFX90a-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: v8i8_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s9, 8
+; GFX90a-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s9, 24
+; GFX90a-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v1, 8, s0
+; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s9, 16
+; GFX90a-PRELOAD-8-NEXT: v_or_b32_sdwa v0, s9, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-8-NEXT: v_or_b32_sdwa v1, s0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s8, 8
+; GFX90a-PRELOAD-8-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX90a-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s8, 24
+; GFX90a-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v2, 8, s0
+; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s8, 16
+; GFX90a-PRELOAD-8-NEXT: v_or_b32_sdwa v0, s8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-8-NEXT: v_or_b32_sdwa v2, s0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-8-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v2, 0
+; GFX90a-PRELOAD-8-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
store <8 x i8> %in, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a) {
-; NO-PRELOAD-LABEL: i64_kernel_preload_arg:
-; NO-PRELOAD: ; %bb.0:
-; NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v2, 0
-; NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s2
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s3
-; NO-PRELOAD-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] sc0 sc1
-; NO-PRELOAD-NEXT: s_endpgm
-;
-; PRELOAD-1-LABEL: i64_kernel_preload_arg:
-; PRELOAD-1: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: ; %bb.0:
-; PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
-; PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
-; PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-1-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
-; PRELOAD-1-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
-; PRELOAD-1-NEXT: s_endpgm
-;
-; PRELOAD-2-LABEL: i64_kernel_preload_arg:
-; PRELOAD-2: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: ; %bb.0:
-; PRELOAD-2-NEXT: v_mov_b32_e32 v2, 0
-; PRELOAD-2-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
-; PRELOAD-2-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
-; PRELOAD-2-NEXT: s_endpgm
-;
-; PRELOAD-4-LABEL: i64_kernel_preload_arg:
-; PRELOAD-4: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: ; %bb.0:
-; PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
-; PRELOAD-4-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
-; PRELOAD-4-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
-; PRELOAD-4-NEXT: s_endpgm
-;
-; PRELOAD-8-LABEL: i64_kernel_preload_arg:
-; PRELOAD-8: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: ; %bb.0:
-; PRELOAD-8-NEXT: v_mov_b32_e32 v2, 0
-; PRELOAD-8-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
-; PRELOAD-8-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
-; PRELOAD-8-NEXT: s_endpgm
+; GFX940-NO-PRELOAD-LABEL: i64_kernel_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s2
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s3
+; GFX940-NO-PRELOAD-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-1-LABEL: i64_kernel_preload_arg:
+; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: ; %bb.0:
+; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
+; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-1-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; GFX940-PRELOAD-1-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
+; GFX940-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: i64_kernel_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v2, 0
+; GFX940-PRELOAD-2-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
+; GFX940-PRELOAD-2-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-4-LABEL: i64_kernel_preload_arg:
+; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: ; %bb.0:
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
+; GFX940-PRELOAD-4-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
+; GFX940-PRELOAD-4-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
+; GFX940-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: i64_kernel_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v2, 0
+; GFX940-PRELOAD-8-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
+; GFX940-PRELOAD-8-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: i64_kernel_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s2
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s3
+; GFX90a-NO-PRELOAD-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-1-LABEL: i64_kernel_preload_arg:
+; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
+; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-1-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1]
+; GFX90a-PRELOAD-1-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
+; GFX90a-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: i64_kernel_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v2, 0
+; GFX90a-PRELOAD-2-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
+; GFX90a-PRELOAD-2-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-4-LABEL: i64_kernel_preload_arg:
+; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
+; GFX90a-PRELOAD-4-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
+; GFX90a-PRELOAD-4-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
+; GFX90a-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: i64_kernel_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v2, 0
+; GFX90a-PRELOAD-8-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
+; GFX90a-PRELOAD-8-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
store i64 %a, ptr addrspace(1) %out, align 8
ret void
}
define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double %in) {
-; NO-PRELOAD-LABEL: f64_kernel_preload_arg:
-; NO-PRELOAD: ; %bb.0:
-; NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v2, 0
-; NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s2
-; NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s3
-; NO-PRELOAD-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] sc0 sc1
-; NO-PRELOAD-NEXT: s_endpgm
-;
-; PRELOAD-1-LABEL: f64_kernel_preload_arg:
-; PRELOAD-1: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: s_nop 0
-; PRELOAD-1-NEXT: ; %bb.0:
-; PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
-; PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
-; PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; PRELOAD-1-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
-; PRELOAD-1-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
-; PRELOAD-1-NEXT: s_endpgm
-;
-; PRELOAD-2-LABEL: f64_kernel_preload_arg:
-; PRELOAD-2: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: s_nop 0
-; PRELOAD-2-NEXT: ; %bb.0:
-; PRELOAD-2-NEXT: v_mov_b32_e32 v2, 0
-; PRELOAD-2-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
-; PRELOAD-2-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
-; PRELOAD-2-NEXT: s_endpgm
-;
-; PRELOAD-4-LABEL: f64_kernel_preload_arg:
-; PRELOAD-4: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: s_nop 0
-; PRELOAD-4-NEXT: ; %bb.0:
-; PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
-; PRELOAD-4-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
-; PRELOAD-4-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
-; PRELOAD-4-NEXT: s_endpgm
-;
-; PRELOAD-8-LABEL: f64_kernel_preload_arg:
-; PRELOAD-8: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: s_nop 0
-; PRELOAD-8-NEXT: ; %bb.0:
-; PRELOAD-8-NEXT: v_mov_b32_e32 v2, 0
-; PRELOAD-8-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
-; PRELOAD-8-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
-; PRELOAD-8-NEXT: s_endpgm
+; GFX940-NO-PRELOAD-LABEL: f64_kernel_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s2
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s3
+; GFX940-NO-PRELOAD-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-1-LABEL: f64_kernel_preload_arg:
+; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1-NEXT: ; %bb.0:
+; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
+; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
+; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-1-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; GFX940-PRELOAD-1-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
+; GFX940-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: f64_kernel_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v2, 0
+; GFX940-PRELOAD-2-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
+; GFX940-PRELOAD-2-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-4-LABEL: f64_kernel_preload_arg:
+; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4-NEXT: ; %bb.0:
+; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
+; GFX940-PRELOAD-4-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
+; GFX940-PRELOAD-4-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
+; GFX940-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: f64_kernel_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v2, 0
+; GFX940-PRELOAD-8-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
+; GFX940-PRELOAD-8-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: f64_kernel_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s2
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s3
+; GFX90a-NO-PRELOAD-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-1-LABEL: f64_kernel_preload_arg:
+; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
+; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
+; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-1-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1]
+; GFX90a-PRELOAD-1-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
+; GFX90a-PRELOAD-1-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: f64_kernel_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v2, 0
+; GFX90a-PRELOAD-2-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
+; GFX90a-PRELOAD-2-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-4-LABEL: f64_kernel_preload_arg:
+; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
+; GFX90a-PRELOAD-4-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
+; GFX90a-PRELOAD-4-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
+; GFX90a-PRELOAD-4-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: f64_kernel_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v2, 0
+; GFX90a-PRELOAD-8-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
+; GFX90a-PRELOAD-8-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
store double %in, ptr addrspace(1) %out
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/readsteadycounter.ll b/llvm/test/CodeGen/AMDGPU/readsteadycounter.ll
new file mode 100644
index 0000000..15f664c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/readsteadycounter.ll
@@ -0,0 +1,24 @@
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck %s -check-prefixes=GCN,GFX700
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck %s -check-prefixes=GCN,GFX900
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck %s -check-prefixes=GCN,GFX900
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck %s -check-prefixes=GCN,GFX1100
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck %s -check-prefixes=GCN,GFX1100
+
+declare i64 @llvm.readsteadycounter() #0
+
+; GCN-LABEL: {{^}}test_readsteadycounter:
+; GFX700: s_mov_b32 s[[REG:[0-9]+]], 0
+; GFX900: s_memrealtime s[[[LO:[0-9]+]]:[[HI:[0-9]+]]]
+; GFX900: s_memrealtime s[[[LO:[0-9]+]]:[[HI:[0-9]+]]]
+; GFX1100: s_sendmsg_rtn_b64 s[[[LO:[0-9]+]]:[[HI:[0-9]+]]], sendmsg(MSG_RTN_GET_REALTIME)
+; GFX1100: s_sendmsg_rtn_b64 s[[[LO:[0-9]+]]:[[HI:[0-9]+]]], sendmsg(MSG_RTN_GET_REALTIME)
+define amdgpu_kernel void @test_readsteadycounter(ptr addrspace(1) %out) #0 {
+ %cycle0 = call i64 @llvm.readsteadycounter()
+ store volatile i64 %cycle0, ptr addrspace(1) %out
+
+ %cycle1 = call i64 @llvm.readsteadycounter()
+ store volatile i64 %cycle1, ptr addrspace(1) %out
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll b/llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll
index 8c806e7..b87439a 100644
--- a/llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll
+++ b/llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll
@@ -31,205 +31,188 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x
; CHECK-NEXT: [[COPY13:%[0-9]+]]:sgpr_32 = COPY $sgpr10
; CHECK-NEXT: [[COPY14:%[0-9]+]]:sgpr_32 = COPY $sgpr8
; CHECK-NEXT: undef [[S_LOAD_DWORDX2_IMM:%[0-9]+]].sub0_sub1:sgpr_128 = S_LOAD_DWORDX2_IMM [[COPY]], 232, 0 :: (invariant load (s64) from %ir.39, addrspace 4)
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM undef %125:sgpr_128, 0, 0 :: (dereferenceable invariant load (s32))
+ ; CHECK-NEXT: KILL undef %125:sgpr_128
; CHECK-NEXT: [[S_LSHL_B32_:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY5]], 4, implicit-def dead $scc
; CHECK-NEXT: [[S_LSHL_B32_1:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY4]], 4, implicit-def dead $scc
; CHECK-NEXT: [[S_LSHL_B32_2:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY3]], 4, implicit-def dead $scc
; CHECK-NEXT: [[S_ASHR_I32_:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[S_LSHL_B32_]], 31, implicit-def dead $scc
; CHECK-NEXT: [[S_ASHR_I32_1:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[S_LSHL_B32_1]], 31, implicit-def dead $scc
- ; CHECK-NEXT: [[S_ASHR_I32_2:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[S_LSHL_B32_2]], 31, implicit-def dead $scc
; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]].sub1:sgpr_128 = S_AND_B32 [[S_LOAD_DWORDX2_IMM]].sub1, 65535, implicit-def dead $scc
+ ; CHECK-NEXT: [[S_ASHR_I32_2:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[S_LSHL_B32_2]], 31, implicit-def dead $scc
+ ; CHECK-NEXT: [[S_SUB_I32_:%[0-9]+]]:sreg_32 = S_SUB_I32 [[S_BUFFER_LOAD_DWORD_IMM]], 29, implicit-def dead $scc
+ ; CHECK-NEXT: [[S_SUB_I32_1:%[0-9]+]]:sreg_32 = S_SUB_I32 [[S_BUFFER_LOAD_DWORD_IMM]], 30, implicit-def dead $scc
; CHECK-NEXT: undef [[S_ADD_U32_:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY6]], [[S_LSHL_B32_2]], implicit-def $scc
; CHECK-NEXT: [[S_ADD_U32_:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %54:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc
; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_]], 16, 0 :: (invariant load (s128) from %ir.81, addrspace 4)
; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM1:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM undef %74:sreg_64, 0, 0 :: (invariant load (s128) from `ptr addrspace(4) undef`, addrspace 4)
- ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM undef %132:sgpr_128, 0, 0 :: (dereferenceable invariant load (s32))
; CHECK-NEXT: KILL undef %74:sreg_64
- ; CHECK-NEXT: KILL undef %132:sgpr_128
; CHECK-NEXT: KILL [[S_ADD_U32_]].sub0, [[S_ADD_U32_]].sub1
; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM [[S_LOAD_DWORDX4_IMM]], 0, 0 :: (dereferenceable invariant load (s32))
; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; CHECK-NEXT: undef [[S_MOV_B32_:%[0-9]+]].sub1:sgpr_128 = S_MOV_B32 0
+ ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET undef %118:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], undef %89:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM1]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: KILL undef %89:sgpr_128
- ; CHECK-NEXT: [[S_SUB_I32_:%[0-9]+]]:sreg_32 = S_SUB_I32 [[S_BUFFER_LOAD_DWORD_IMM]], 29, implicit-def dead $scc
- ; CHECK-NEXT: [[S_SUB_I32_1:%[0-9]+]]:sreg_32 = S_SUB_I32 [[S_BUFFER_LOAD_DWORD_IMM]], 30, implicit-def dead $scc
+ ; CHECK-NEXT: KILL undef %118:sgpr_128
; CHECK-NEXT: [[S_SUB_I32_2:%[0-9]+]]:sreg_32 = S_SUB_I32 [[S_BUFFER_LOAD_DWORD_IMM1]], 31, implicit-def dead $scc
- ; CHECK-NEXT: [[S_ADD_U32_1:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY6]], 64, implicit-def $scc
- ; CHECK-NEXT: [[S_ADDC_U32_:%[0-9]+]]:sreg_32 = S_ADDC_U32 undef %54:sreg_32, 0, implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_2:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[S_ADD_U32_1]], [[S_LSHL_B32_]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_2:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 [[S_ADDC_U32_]], [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_3:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[S_ADD_U32_1]], [[S_LSHL_B32_1]], implicit-def $scc
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM2:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_2]], 0, 0 :: (invariant load (s128) from %ir.87, addrspace 4)
- ; CHECK-NEXT: [[S_ADD_U32_3:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 [[S_ADDC_U32_]], [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_4:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[S_ADD_U32_1]], [[S_LSHL_B32_2]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_4:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 [[S_ADDC_U32_]], [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: [[S_ASHR_I32_3:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 undef %171:sreg_32, 31, implicit-def dead $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_5:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[S_ADD_U32_1]], undef %171:sreg_32, implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_5:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 [[S_ADDC_U32_]], [[S_ASHR_I32_3]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_6:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, [[S_LSHL_B32_]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_6:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_7:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, [[S_LSHL_B32_1]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_7:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_8:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, undef %171:sreg_32, implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_8:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_3]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: [[S_ADD_U32_9:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY7]].sub0, 224, implicit-def $scc
- ; CHECK-NEXT: [[S_ADDC_U32_1:%[0-9]+]]:sreg_32 = S_ADDC_U32 undef %51:sreg_32, 0, implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_10:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[S_ADD_U32_9]], [[S_LSHL_B32_]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_10:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 [[S_ADDC_U32_1]], [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_11:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[S_ADD_U32_9]], [[S_LSHL_B32_1]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_11:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 [[S_ADDC_U32_1]], [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_12:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[S_ADD_U32_9]], [[S_LSHL_B32_2]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_12:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 [[S_ADDC_U32_1]], [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: [[S_ADD_U32_13:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY7]].sub0, 576, implicit-def $scc
- ; CHECK-NEXT: [[S_ADDC_U32_2:%[0-9]+]]:sreg_32 = S_ADDC_U32 undef %51:sreg_32, 0, implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_14:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[S_ADD_U32_13]], [[S_LSHL_B32_]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_14:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 [[S_ADDC_U32_2]], [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_15:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[S_ADD_U32_13]], [[S_LSHL_B32_2]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_15:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 [[S_ADDC_U32_2]], [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_16:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[S_ADD_U32_13]], undef %171:sreg_32, implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_16:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 [[S_ADDC_U32_2]], [[S_ASHR_I32_3]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_17:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY8]], [[S_LSHL_B32_]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_17:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %48:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_18:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY9]], [[S_LSHL_B32_1]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_18:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %45:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_19:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY9]], [[S_LSHL_B32_2]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_19:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %45:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_1:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY6]], [[S_LSHL_B32_]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_1:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %54:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_2:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY6]], [[S_LSHL_B32_1]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_2:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %54:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_3:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY6]], [[S_LSHL_B32_2]], implicit-def $scc
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM2:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_1]], 64, 0 :: (invariant load (s128) from %ir.87, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM3:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_2]], 64, 0 :: (invariant load (s128) from %ir.93, addrspace 4)
+ ; CHECK-NEXT: KILL [[S_ADD_U32_1]].sub0, [[S_ADD_U32_1]].sub1
+ ; CHECK-NEXT: KILL [[S_ADD_U32_2]].sub0, [[S_ADD_U32_2]].sub1
+ ; CHECK-NEXT: [[S_ADD_U32_3:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %54:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: [[S_ASHR_I32_3:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 undef %169:sreg_32, 31, implicit-def dead $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_4:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY6]], undef %169:sreg_32, implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_4:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %54:sreg_32, [[S_ASHR_I32_3]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_5:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, [[S_LSHL_B32_]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_5:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_6:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, [[S_LSHL_B32_1]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_6:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_7:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, undef %169:sreg_32, implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_7:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_3]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_8:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, [[S_LSHL_B32_2]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_8:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_9:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY8]], [[S_LSHL_B32_]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_9:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %48:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_10:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY9]], [[S_LSHL_B32_1]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_10:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %45:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_11:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY9]], [[S_LSHL_B32_2]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_11:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %45:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc
; CHECK-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_LSHL_B32_]], 16, implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_1:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_LSHL_B32_2]], 16, implicit-def dead $scc
; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM [[S_MOV_B32_]], [[S_ADD_I32_]], 0, 0 :: (dereferenceable invariant load (s32))
- ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM [[S_MOV_B32_]], undef %312:sreg_32, 0, 0 :: (dereferenceable invariant load (s32))
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM [[S_MOV_B32_]], undef %301:sreg_32, 0, 0 :: (dereferenceable invariant load (s32))
; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM2:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM [[S_MOV_B32_]], [[S_ADD_I32_1]], 0, 0 :: (dereferenceable invariant load (s32))
; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM2:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM [[S_MOV_B32_]], 16, 0 :: (dereferenceable invariant load (s32))
- ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET undef %118:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM3:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %367:sgpr_128, undef %368:sreg_32, 0, 0 :: (dereferenceable invariant load (s32))
- ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM3:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM undef %378:sgpr_128, 16, 0 :: (dereferenceable invariant load (s32))
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM3:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_3]], 0, 0 :: (invariant load (s128) from %ir.92, addrspace 4)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM4:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_4]], 0, 0 :: (invariant load (s128) from %ir.97, addrspace 4)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM5:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_5]], 0, 0 :: (invariant load (s128) from %ir.104, addrspace 4)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM6:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_6]], 0, 0 :: (invariant load (s128) from %ir.109, addrspace 4)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM7:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_7]], 0, 0 :: (invariant load (s128) from %ir.114, addrspace 4)
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM3:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %356:sgpr_128, undef %357:sreg_32, 0, 0 :: (dereferenceable invariant load (s32))
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM3:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM undef %367:sgpr_128, 16, 0 :: (dereferenceable invariant load (s32))
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM4:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_3]], 64, 0 :: (invariant load (s128) from %ir.99, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM5:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_4]], 64, 0 :: (invariant load (s128) from %ir.107, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM6:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_6]], 0, 0 :: (invariant load (s128) from %ir.117, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM7:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_7]], 0, 0 :: (invariant load (s128) from %ir.124, addrspace 4)
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN2:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM2]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM4:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %362:sgpr_128, [[S_ADD_I32_]], 0, 0 :: (dereferenceable invariant load (s32))
- ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM5:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %373:sgpr_128, [[S_ADD_I32_1]], 0, 0 :: (dereferenceable invariant load (s32))
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM4:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %351:sgpr_128, [[S_ADD_I32_]], 0, 0 :: (dereferenceable invariant load (s32))
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM5:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %362:sgpr_128, [[S_ADD_I32_1]], 0, 0 :: (dereferenceable invariant load (s32))
+ ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN3:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM3]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[S_ADD_I32_2:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM]], -98, implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_3:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM1]], -114, implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_4:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM2]], -130, implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_5:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM2]], -178, implicit-def dead $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_20:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY10]], [[S_LSHL_B32_]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_20:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %42:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_21:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY11]], [[S_LSHL_B32_]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_21:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %39:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_22:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY11]], [[S_LSHL_B32_1]], implicit-def $scc
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM8:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_8]], 0, 0 :: (invariant load (s128) from %ir.121, addrspace 4)
- ; CHECK-NEXT: [[S_ADD_U32_22:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %39:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_23:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY11]], [[S_LSHL_B32_2]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_23:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %39:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM8:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_5]], 0, 0 :: (invariant load (s128) from %ir.112, addrspace 4)
+ ; CHECK-NEXT: undef [[S_ADD_U32_12:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY10]], [[S_LSHL_B32_]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_12:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %42:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_13:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY11]], [[S_LSHL_B32_]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_13:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %39:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_14:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY11]], [[S_LSHL_B32_1]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_14:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %39:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_15:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY11]], [[S_LSHL_B32_2]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_15:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %39:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc
; CHECK-NEXT: [[S_LSHL_B32_3:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY12]], 4, implicit-def dead $scc
- ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN3:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM3]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; CHECK-NEXT: [[S_ADD_I32_6:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_LSHL_B32_3]], 16, implicit-def dead $scc
- ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM6:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %394:sgpr_128, [[S_ADD_I32_6]], 0, 0 :: (dereferenceable invariant load (s32))
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN4:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM4]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM9:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[COPY7]], 224, 0 :: (invariant load (s128) from %ir.126, addrspace 4)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM10:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_10]], 0, 0 :: (invariant load (s128) from %ir.127, addrspace 4)
+ ; CHECK-NEXT: [[S_ADD_I32_6:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_LSHL_B32_3]], 16, implicit-def dead $scc
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM6:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %383:sgpr_128, [[S_ADD_I32_6]], 0, 0 :: (dereferenceable invariant load (s32))
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN5:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM5]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM11:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_11]], 0, 0 :: (invariant load (s128) from %ir.132, addrspace 4)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM12:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_12]], 0, 0 :: (invariant load (s128) from %ir.137, addrspace 4)
- ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN6:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM6]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN7:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM7]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN8:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM8]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM9:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_5]], 224, 0 :: (invariant load (s128) from %ir.129, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM10:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[COPY7]], 224, 0 :: (invariant load (s128) from %ir.145, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM11:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_5]], 576, 0 :: (invariant load (s128) from %ir.150, addrspace 4)
+ ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN6:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM8]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM12:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_6]], 224, 0 :: (invariant load (s128) from %ir.134, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM13:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_7]], 576, 0 :: (invariant load (s128) from %ir.162, addrspace 4)
+ ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN7:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM6]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN8:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM7]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[S_ADD_I32_7:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM4]], -217, implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_8:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM3]], -233, implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_9:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM5]], -249, implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_10:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM3]], -297, implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_11:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM3]], -313, implicit-def dead $scc
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM14:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_8]], 224, 0 :: (invariant load (s128) from %ir.140, addrspace 4)
; CHECK-NEXT: [[S_ADD_I32_12:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM3]], -329, implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_13:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM3]], -345, implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_14:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM6]], -441, implicit-def dead $scc
- ; CHECK-NEXT: [[S_ADD_U32_24:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY2]], 160, implicit-def $scc
- ; CHECK-NEXT: [[S_ADDC_U32_3:%[0-9]+]]:sreg_32 = S_ADDC_U32 undef %36:sreg_32, 0, implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_25:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[S_ADD_U32_24]], [[S_LSHL_B32_2]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_25:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 [[S_ADDC_U32_3]], [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_16:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY2]], [[S_LSHL_B32_2]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_16:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %36:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc
; CHECK-NEXT: [[S_LSHL_B32_4:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY13]], 4, implicit-def dead $scc
- ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN9:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM10]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN9:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM9]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[S_ASHR_I32_4:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[S_LSHL_B32_4]], 31, implicit-def dead $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_26:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[S_ADD_U32_24]], [[S_LSHL_B32_4]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_26:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 [[S_ADDC_U32_3]], [[S_ASHR_I32_4]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: [[S_ADD_U32_27:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY]].sub0, 168, implicit-def $scc
- ; CHECK-NEXT: [[S_ADDC_U32_4:%[0-9]+]]:sreg_32 = S_ADDC_U32 undef %57:sreg_32, 0, implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM13:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_14]], 0, 0 :: (invariant load (s128) from %ir.147, addrspace 4)
+ ; CHECK-NEXT: undef [[S_ADD_U32_17:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY2]], [[S_LSHL_B32_4]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_17:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %36:sreg_32, [[S_ASHR_I32_4]], implicit-def dead $scc, implicit $scc
; CHECK-NEXT: [[S_LSHL_B32_5:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY5]], 3, implicit-def dead $scc
- ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN10:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM11]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN10:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM12]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[S_ASHR_I32_5:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[S_LSHL_B32_5]], 31, implicit-def dead $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_28:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[S_ADD_U32_27]], [[S_LSHL_B32_5]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_28:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 [[S_ADDC_U32_4]], [[S_ASHR_I32_5]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[S_ADD_U32_28]], 0, 0 :: (invariant load (s32) from %ir.269, align 8, addrspace 4)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM14:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_15]], 0, 0 :: (invariant load (s128) from %ir.154, addrspace 4)
- ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN11:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM12]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM15:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_16]], 0, 0 :: (invariant load (s128) from %ir.159, addrspace 4)
- ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN12:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM9]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN13:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM13]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; CHECK-NEXT: undef [[S_ADD_U32_18:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_5]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_18:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_5]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[S_ADD_U32_18]], 168, 0 :: (invariant load (s32) from %ir.273, align 8, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM15:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_8]], 576, 0 :: (invariant load (s128) from %ir.157, addrspace 4)
+ ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN11:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM14]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN12:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM10]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN13:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM11]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]].sub3:sgpr_128 = S_MOV_B32 553734060
; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]].sub2:sgpr_128 = S_MOV_B32 -1
; CHECK-NEXT: [[COPY15:%[0-9]+]]:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM]]
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM16:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_17]], 0, 0 :: (invariant load (s128) from %ir.167, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM16:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_9]], 0, 0 :: (invariant load (s128) from %ir.170, addrspace 4)
; CHECK-NEXT: [[COPY15:%[0-9]+]].sub1:sgpr_128 = COPY [[S_MOV_B32_]].sub1
; CHECK-NEXT: [[COPY15:%[0-9]+]].sub0:sgpr_128 = COPY [[S_LOAD_DWORD_IMM]]
; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM4:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM [[COPY15]], 0, 0 :: (dereferenceable invariant load (s32))
- ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN14:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM14]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN15:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM15]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM17:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_18]], 0, 0 :: (invariant load (s128) from %ir.175, addrspace 4)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM18:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_19]], 0, 0 :: (invariant load (s128) from %ir.180, addrspace 4)
+ ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN14:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM15]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN15:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM13]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM17:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_10]], 0, 0 :: (invariant load (s128) from %ir.178, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM18:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_11]], 0, 0 :: (invariant load (s128) from %ir.183, addrspace 4)
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN16:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM16]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[S_LSHL_B32_6:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY4]], 3, implicit-def dead $scc
; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[S_LOAD_DWORDX4_IMM1]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[S_ASHR_I32_6:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[S_LSHL_B32_6]], 31, implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_15:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM4]], -467, implicit-def dead $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_29:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[S_ADD_U32_27]], [[S_LSHL_B32_6]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_29:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 [[S_ADDC_U32_4]], [[S_ASHR_I32_6]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM1:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[S_ADD_U32_29]], 0, 0 :: (invariant load (s64) from %ir.277, addrspace 4)
+ ; CHECK-NEXT: undef [[S_ADD_U32_19:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_6]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_19:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_6]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM1:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[S_ADD_U32_19]], 168, 0 :: (invariant load (s64) from %ir.282, addrspace 4)
; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET2:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[S_LOAD_DWORDX4_IMM17]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET3:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[S_LOAD_DWORDX4_IMM18]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM19:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_20]], 0, 0 :: (invariant load (s128) from %ir.202, addrspace 4)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM20:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_21]], 0, 0 :: (invariant load (s128) from %ir.208, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM19:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_12]], 0, 0 :: (invariant load (s128) from %ir.205, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM20:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_13]], 0, 0 :: (invariant load (s128) from %ir.211, addrspace 4)
; CHECK-NEXT: [[COPY16:%[0-9]+]]:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM]]
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM21:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_22]], 0, 0 :: (invariant load (s128) from %ir.213, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM21:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_14]], 0, 0 :: (invariant load (s128) from %ir.216, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM22:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_15]], 0, 0 :: (invariant load (s128) from %ir.221, addrspace 4)
; CHECK-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[S_LOAD_DWORDX2_IMM1]].sub1, 65535, implicit-def dead $scc
; CHECK-NEXT: [[COPY16:%[0-9]+]].sub0:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM1]].sub0
; CHECK-NEXT: [[COPY16:%[0-9]+]].sub1:sgpr_128 = COPY [[S_AND_B32_]]
; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM5:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM [[COPY16]], 0, 0 :: (dereferenceable invariant load (s32))
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM22:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_23]], 0, 0 :: (invariant load (s128) from %ir.218, addrspace 4)
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN17:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM19]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN18:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM20]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; CHECK-NEXT: [[S_LSHL_B32_7:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY3]], 3, implicit-def dead $scc
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN19:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM21]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; CHECK-NEXT: [[S_LSHL_B32_7:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY3]], 3, implicit-def dead $scc
+ ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN20:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM22]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[S_ASHR_I32_7:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[S_LSHL_B32_7]], 31, implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_16:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM5]], -468, implicit-def dead $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_30:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[S_ADD_U32_27]], [[S_LSHL_B32_7]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_30:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 [[S_ADDC_U32_4]], [[S_ASHR_I32_7]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN20:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM22]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM2:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[S_ADD_U32_30]], 0, 0 :: (invariant load (s64) from %ir.287, addrspace 4)
+ ; CHECK-NEXT: undef [[S_ADD_U32_20:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_7]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_20:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_7]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM2:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[S_ADD_U32_20]], 168, 0 :: (invariant load (s64) from %ir.293, addrspace 4)
; CHECK-NEXT: [[COPY17:%[0-9]+]]:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM]]
; CHECK-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32 = S_AND_B32 [[S_LOAD_DWORDX2_IMM2]].sub1, 65535, implicit-def dead $scc
; CHECK-NEXT: [[COPY17:%[0-9]+]].sub0:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM2]].sub0
; CHECK-NEXT: [[COPY17:%[0-9]+]].sub1:sgpr_128 = COPY [[S_AND_B32_1]]
; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM6:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM [[COPY17]], 0, 0 :: (dereferenceable invariant load (s32))
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM23:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_25]], 0, 0 :: (invariant load (s128) from %ir.253, addrspace 4)
- ; CHECK-NEXT: [[S_LOAD_DWORD_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM undef %484:sreg_64, 0, 0 :: (invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
- ; CHECK-NEXT: KILL [[S_ADD_U32_25]].sub0, [[S_ADD_U32_25]].sub1
- ; CHECK-NEXT: KILL undef %484:sreg_64
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM23:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_16]], 160, 0 :: (invariant load (s128) from %ir.256, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORD_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM undef %469:sreg_64, 0, 0 :: (invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+ ; CHECK-NEXT: KILL [[S_ADD_U32_16]].sub0, [[S_ADD_U32_16]].sub1
+ ; CHECK-NEXT: KILL undef %469:sreg_64
; CHECK-NEXT: KILL [[COPY17]].sub0_sub1_sub2, [[COPY17]].sub3
; CHECK-NEXT: [[S_LSHL_B32_8:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY14]], 3, implicit-def dead $scc
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM24:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_26]], 0, 0 :: (invariant load (s128) from %ir.261, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM24:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_17]], 160, 0 :: (invariant load (s128) from %ir.265, addrspace 4)
; CHECK-NEXT: [[S_ASHR_I32_8:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[S_LSHL_B32_8]], 31, implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_17:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM6]], -469, implicit-def dead $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_31:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[S_ADD_U32_27]], [[S_LSHL_B32_8]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_31:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 [[S_ADDC_U32_4]], [[S_ASHR_I32_8]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: [[S_LOAD_DWORD_IMM2:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[S_ADD_U32_31]], 0, 0 :: (invariant load (s32) from %ir.298, align 8, addrspace 4)
+ ; CHECK-NEXT: undef [[S_ADD_U32_21:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_8]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_21:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_8]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: [[S_LOAD_DWORD_IMM2:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[S_ADD_U32_21]], 168, 0 :: (invariant load (s32) from %ir.305, align 8, addrspace 4)
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN21:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM23]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN22:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM24]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; CHECK-NEXT: KILL [[S_LOAD_DWORDX4_IMM24]]
; CHECK-NEXT: KILL [[S_LOAD_DWORDX4_IMM23]]
+ ; CHECK-NEXT: KILL [[S_LOAD_DWORDX4_IMM24]]
; CHECK-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32 = S_AND_B32 [[S_LOAD_DWORD_IMM1]], 65535, implicit-def dead $scc
; CHECK-NEXT: [[COPY18:%[0-9]+]]:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM]]
; CHECK-NEXT: [[COPY18:%[0-9]+]].sub1:sgpr_128 = COPY [[S_AND_B32_2]]
@@ -241,24 +224,22 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x
; CHECK-NEXT: [[S_ADD_I32_21:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM3]], -507, implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_22:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM3]], -539, implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_23:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM7]], -473, implicit-def dead $scc
- ; CHECK-NEXT: [[S_ADD_U32_32:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY1]], 96, implicit-def $scc
- ; CHECK-NEXT: [[S_ADDC_U32_5:%[0-9]+]]:sreg_32 = S_ADDC_U32 undef %33:sreg_32, 0, implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_33:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[S_ADD_U32_32]], [[S_LSHL_B32_]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_33:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 [[S_ADDC_U32_5]], [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM25:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_33]], 0, 0 :: (invariant load (s128) from %ir.316, addrspace 4)
- ; CHECK-NEXT: undef [[S_ADD_U32_34:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[S_ADD_U32_32]], [[S_LSHL_B32_1]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_34:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 [[S_ADDC_U32_5]], [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM26:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_34]], 0, 0 :: (invariant load (s128) from %ir.321, addrspace 4)
- ; CHECK-NEXT: undef [[S_ADD_U32_35:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[S_ADD_U32_32]], [[S_LSHL_B32_2]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_35:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 [[S_ADDC_U32_5]], [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM27:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_35]], 0, 0 :: (invariant load (s128) from %ir.326, addrspace 4)
+ ; CHECK-NEXT: undef [[S_ADD_U32_22:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY1]], [[S_LSHL_B32_]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_22:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %33:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM25:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_22]], 96, 0 :: (invariant load (s128) from %ir.323, addrspace 4)
+ ; CHECK-NEXT: undef [[S_ADD_U32_23:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY1]], [[S_LSHL_B32_1]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_23:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %33:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM26:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_23]], 96, 0 :: (invariant load (s128) from %ir.329, addrspace 4)
+ ; CHECK-NEXT: undef [[S_ADD_U32_24:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY1]], [[S_LSHL_B32_2]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_24:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %33:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM27:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_24]], 96, 0 :: (invariant load (s128) from %ir.335, addrspace 4)
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN23:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM25]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN24:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM26]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN25:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM27]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; CHECK-NEXT: KILL [[S_LOAD_DWORDX4_IMM25]]
- ; CHECK-NEXT: KILL [[V_MOV_B32_e32_]]
; CHECK-NEXT: KILL [[S_LOAD_DWORDX4_IMM26]]
+ ; CHECK-NEXT: KILL [[V_MOV_B32_e32_]]
; CHECK-NEXT: KILL [[S_LOAD_DWORDX4_IMM27]]
+ ; CHECK-NEXT: KILL [[S_LOAD_DWORDX4_IMM25]]
; CHECK-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 -2, [[BUFFER_LOAD_FORMAT_X_IDXEN]], 0, implicit $exec
; CHECK-NEXT: [[V_ADD_U32_e64_1:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 -1, [[BUFFER_LOAD_FORMAT_X_IDXEN1]], 0, implicit $exec
; CHECK-NEXT: [[V_ADD_U32_e64_2:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 -3, [[BUFFER_LOAD_FORMAT_X_IDXEN]], 0, implicit $exec
@@ -370,13 +351,13 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x
; CHECK-NEXT: [[V_OR_B32_e64_64:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 [[V_OR_B32_e64_63]], [[V_ADD_U32_e64_28]], implicit $exec
; CHECK-NEXT: [[V_ADD_U32_e64_30:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 -593, [[BUFFER_LOAD_FORMAT_X_IDXEN]], 0, implicit $exec
; CHECK-NEXT: [[V_OR_B32_e64_65:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 [[V_OR_B32_e64_64]], [[V_ADD_U32_e64_29]], implicit $exec
- ; CHECK-NEXT: [[S_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_LOAD_DWORDX8_IMM undef %559:sreg_64, 0, 0 :: (invariant load (s256) from `ptr addrspace(4) undef`, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_LOAD_DWORDX8_IMM undef %542:sreg_64, 0, 0 :: (invariant load (s256) from `ptr addrspace(4) undef`, addrspace 4)
; CHECK-NEXT: [[V_OR_B32_e64_66:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 [[V_OR_B32_e64_65]], [[V_ADD_U32_e64_30]], implicit $exec
; CHECK-NEXT: [[S_ADD_I32_24:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM8]], -594, implicit-def dead $scc
; CHECK-NEXT: [[V_OR_B32_e64_67:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 [[S_ADD_I32_24]], [[V_OR_B32_e64_66]], implicit $exec
; CHECK-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 0, [[V_OR_B32_e64_67]], implicit $exec
; CHECK-NEXT: undef [[V_CNDMASK_B32_e64_:%[0-9]+]].sub3:vreg_128 = V_CNDMASK_B32_e64 0, 0, 0, 1, [[V_CMP_EQ_U32_e64_]], implicit $exec
- ; CHECK-NEXT: IMAGE_STORE_V4_V2_gfx10 [[V_CNDMASK_B32_e64_]], undef %573:vreg_64, [[S_LOAD_DWORDX8_IMM]], 15, 1, -1, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store (s128), addrspace 8)
+ ; CHECK-NEXT: IMAGE_STORE_V4_V2_gfx10 [[V_CNDMASK_B32_e64_]], undef %556:vreg_64, [[S_LOAD_DWORDX8_IMM]], 15, 1, -1, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store (s128), addrspace 8)
; CHECK-NEXT: S_ENDPGM 0
.expVert:
%0 = extractelement <31 x i32> %userData, i64 2
diff --git a/llvm/test/CodeGen/Generic/replace-intrinsics-with-veclib.ll b/llvm/test/CodeGen/Generic/replace-intrinsics-with-veclib.ll
index df8b7c4..fde6cb7 100644
--- a/llvm/test/CodeGen/Generic/replace-intrinsics-with-veclib.ll
+++ b/llvm/test/CodeGen/Generic/replace-intrinsics-with-veclib.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-attributes
; RUN: opt -vector-library=SVML -replace-with-veclib -S < %s | FileCheck %s --check-prefixes=COMMON,SVML
+; RUN: opt -vector-library=AMDLIBM -replace-with-veclib -S < %s | FileCheck %s --check-prefixes=COMMON,AMDLIBM
; RUN: opt -vector-library=LIBMVEC-X86 -replace-with-veclib -S < %s | FileCheck %s --check-prefixes=COMMON,LIBMVEC-X86
; RUN: opt -vector-library=MASSV -replace-with-veclib -S < %s | FileCheck %s --check-prefixes=COMMON,MASSV
; RUN: opt -vector-library=Accelerate -replace-with-veclib -S < %s | FileCheck %s --check-prefixes=COMMON,ACCELERATE
@@ -13,6 +14,11 @@ define <4 x double> @exp_v4(<4 x double> %in) {
; SVML-NEXT: [[TMP1:%.*]] = call <4 x double> @__svml_exp4(<4 x double> [[IN]])
; SVML-NEXT: ret <4 x double> [[TMP1]]
;
+; AMDLIBM-LABEL: define {{[^@]+}}@exp_v4
+; AMDLIBM-SAME: (<4 x double> [[IN:%.*]]) {
+; AMDLIBM-NEXT: [[TMP1:%.*]] = call <4 x double> @amd_vrd4_exp(<4 x double> [[IN]])
+; AMDLIBM-NEXT: ret <4 x double> [[TMP1]]
+;
; LIBMVEC-X86-LABEL: define {{[^@]+}}@exp_v4
; LIBMVEC-X86-SAME: (<4 x double> [[IN:%.*]]) {
; LIBMVEC-X86-NEXT: [[TMP1:%.*]] = call <4 x double> @_ZGVdN4v_exp(<4 x double> [[IN]])
@@ -40,6 +46,11 @@ define <4 x float> @exp_f32(<4 x float> %in) {
; SVML-NEXT: [[TMP1:%.*]] = call <4 x float> @__svml_expf4(<4 x float> [[IN]])
; SVML-NEXT: ret <4 x float> [[TMP1]]
;
+; AMDLIBM-LABEL: define {{[^@]+}}@exp_f32
+; AMDLIBM-SAME: (<4 x float> [[IN:%.*]]) {
+; AMDLIBM-NEXT: [[TMP1:%.*]] = call <4 x float> @amd_vrs4_expf(<4 x float> [[IN]])
+; AMDLIBM-NEXT: ret <4 x float> [[TMP1]]
+;
; LIBMVEC-X86-LABEL: define {{[^@]+}}@exp_f32
; LIBMVEC-X86-SAME: (<4 x float> [[IN:%.*]]) {
; LIBMVEC-X86-NEXT: [[TMP1:%.*]] = call <4 x float> @_ZGVbN4v_expf(<4 x float> [[IN]])
diff --git a/llvm/test/CodeGen/Hexagon/vector-zext-v4i8.ll b/llvm/test/CodeGen/Hexagon/vector-zext-v4i8.ll
new file mode 100644
index 0000000..4d0e6db
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/vector-zext-v4i8.ll
@@ -0,0 +1,112 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Check that when we extract a byte from the result of a mask from predicate
+; that the results of the mask all fit in the same word.
+; CHECK: [[PRED:p[0-9]+]] = vcmpb.gtu(r{{.*}},#0)
+; CHECK: [[REG1:r[0-9]*:[0-9]*]] = mask([[PRED]])
+; CHECK: [[REG2:r[0-9]*]] = vtrunehb([[REG1]])
+; CHECK: {{r[0-9]*}} = extractu([[REG2]],#1,#8)
+
+target triple = "hexagon"
+
+%struct.pluto = type { [12 x %struct.pluto.0], [4 x %struct.pluto.0], [2 x %struct.pluto.0], [4 x %struct.pluto.0], [6 x %struct.pluto.0], [2 x [7 x %struct.pluto.0]], [4 x %struct.pluto.0], [3 x [4 x %struct.pluto.0]], [3 x %struct.pluto.0], [3 x %struct.pluto.0] }
+%struct.pluto.0 = type { i8, i8 }
+
+@global = internal unnamed_addr constant [3 x [4 x [2 x i8]]] [[4 x [2 x i8]] [[2 x i8] c"\FAV", [2 x i8] c"\EF_", [2 x i8] c"\FA=", [2 x i8] c"\09-"], [4 x [2 x i8]] [[2 x i8] c"\06E", [2 x i8] c"\F3Z", [2 x i8] c"\004", [2 x i8] c"\08+"], [4 x [2 x i8]] [[2 x i8] c"\FA]", [2 x i8] c"\F2X", [2 x i8] c"\FA,", [2 x i8] c"\047"]], align 8
+
+; Function Attrs: nofree noinline norecurse nosync nounwind memory(write)
+define dso_local void @eggs(ptr nocapture %arg, ptr nocapture readnone %arg1, i32 %arg2, i32 %arg3, i32 %arg4) local_unnamed_addr #0 {
+bb:
+ %icmp = icmp sgt i32 %arg3, 0
+ %select = select i1 %icmp, i32 %arg3, i32 0
+ br i1 false, label %bb33, label %bb5
+
+bb5: ; preds = %bb
+ %insertelement = insertelement <4 x i32> poison, i32 %select, i32 0
+ %shufflevector = shufflevector <4 x i32> %insertelement, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %bb6
+
+bb6: ; preds = %bb6, %bb5
+ %phi = phi i32 [ 0, %bb5 ], [ %add29, %bb6 ]
+ %insertelement7 = insertelement <4 x i32> poison, i32 %phi, i32 0
+ %shufflevector8 = shufflevector <4 x i32> %insertelement7, <4 x i32> poison, <4 x i32> zeroinitializer
+ %add = add <4 x i32> %shufflevector8, <i32 0, i32 1, i32 2, i32 3>
+ %add9 = add i32 %phi, 0
+ %getelementptr = getelementptr inbounds [3 x [4 x [2 x i8]]], ptr @global, i32 0, i32 %arg2, i32 %add9, i32 0
+ %getelementptr10 = getelementptr inbounds i8, ptr %getelementptr, i32 0
+ %bitcast = bitcast ptr %getelementptr10 to ptr
+ %load = load <8 x i8>, ptr %bitcast, align 1
+ %shufflevector11 = shufflevector <8 x i8> %load, <8 x i8> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %shufflevector12 = shufflevector <8 x i8> %load, <8 x i8> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ %getelementptr13 = getelementptr [3 x [4 x [2 x i8]]], ptr @global, i32 0, i32 %arg2, i32 %add9, i32 1
+ %sext = sext <4 x i8> %shufflevector11 to <4 x i32>
+ %mul = mul nsw <4 x i32> %shufflevector, %sext
+ %ashr = ashr <4 x i32> %mul, <i32 4, i32 4, i32 4, i32 4>
+ %sext14 = sext <4 x i8> %shufflevector12 to <4 x i32>
+ %add15 = add nsw <4 x i32> %ashr, %sext14
+ %icmp16 = icmp sgt <4 x i32> %add15, <i32 1, i32 1, i32 1, i32 1>
+ %select17 = select <4 x i1> %icmp16, <4 x i32> %add15, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %icmp18 = icmp slt <4 x i32> %select17, <i32 126, i32 126, i32 126, i32 126>
+ %select19 = select <4 x i1> %icmp18, <4 x i32> %select17, <4 x i32> <i32 126, i32 126, i32 126, i32 126>
+ %icmp20 = icmp sgt <4 x i32> %select19, <i32 63, i32 63, i32 63, i32 63>
+ %trunc = trunc <4 x i32> %select19 to <4 x i8>
+ %add21 = add nsw <4 x i8> %trunc, <i8 -64, i8 -64, i8 -64, i8 -64>
+ %getelementptr22 = getelementptr inbounds %struct.pluto, ptr %arg, i32 0, i32 1, i32 %add9, i32 0
+ %sub = sub nsw <4 x i8> <i8 63, i8 63, i8 63, i8 63>, %trunc
+ %select23 = select <4 x i1> %icmp20, <4 x i8> %add21, <4 x i8> %sub
+ %getelementptr24 = getelementptr inbounds %struct.pluto, ptr %arg, i32 0, i32 1, i32 %add9, i32 1
+ %zext = zext <4 x i1> %icmp20 to <4 x i8>
+ %getelementptr25 = getelementptr inbounds i8, ptr %getelementptr24, i32 -1
+ %bitcast26 = bitcast ptr %getelementptr25 to ptr
+ %shufflevector27 = shufflevector <4 x i8> %select23, <4 x i8> %zext, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %shufflevector28 = shufflevector <8 x i8> %shufflevector27, <8 x i8> poison, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+ store <8 x i8> %shufflevector28, ptr %bitcast26, align 1
+ %add29 = add nuw i32 %phi, 4
+ %icmp30 = icmp eq i32 %add29, 4
+ br i1 %icmp30, label %bb31, label %bb6
+
+bb31: ; preds = %bb6
+ %icmp32 = icmp eq i32 4, 4
+ br i1 %icmp32, label %bb61, label %bb33
+
+bb33: ; preds = %bb31, %bb
+ %phi34 = phi i32 [ 4, %bb31 ], [ 0, %bb ]
+ br label %bb35
+
+bb35: ; preds = %bb35, %bb33
+ %phi36 = phi i32 [ %phi34, %bb33 ], [ %add58, %bb35 ]
+ %getelementptr37 = getelementptr inbounds [3 x [4 x [2 x i8]]], ptr @global, i32 0, i32 %arg2, i32 %phi36, i32 0
+ %load38 = load i8, ptr %getelementptr37, align 2
+ %getelementptr39 = getelementptr [3 x [4 x [2 x i8]]], ptr @global, i32 0, i32 %arg2, i32 %phi36, i32 1
+ %load40 = load i8, ptr %getelementptr39, align 1
+ %sext41 = sext i8 %load38 to i32
+ %mul42 = mul nsw i32 %select, %sext41
+ %ashr43 = ashr i32 %mul42, 4
+ %sext44 = sext i8 %load40 to i32
+ %add45 = add nsw i32 %ashr43, %sext44
+ %icmp46 = icmp sgt i32 %add45, 1
+ %select47 = select i1 %icmp46, i32 %add45, i32 1
+ %icmp48 = icmp slt i32 %select47, 126
+ %select49 = select i1 %icmp48, i32 %select47, i32 126
+ %icmp50 = icmp sgt i32 %select49, 63
+ %trunc51 = trunc i32 %select49 to i8
+ %add52 = add nsw i8 %trunc51, -64
+ %getelementptr53 = getelementptr inbounds %struct.pluto, ptr %arg, i32 0, i32 1, i32 %phi36, i32 0
+ %sub54 = sub nsw i8 63, %trunc51
+ %select55 = select i1 %icmp50, i8 %add52, i8 %sub54
+ store i8 %select55, ptr %getelementptr53, align 1
+ %getelementptr56 = getelementptr inbounds %struct.pluto, ptr %arg, i32 0, i32 1, i32 %phi36, i32 1
+ %zext57 = zext i1 %icmp50 to i8
+ store i8 %zext57, ptr %getelementptr56, align 1
+ %add58 = add nuw nsw i32 %phi36, 1
+ %icmp59 = icmp eq i32 %add58, 4
+ br i1 %icmp59, label %bb60, label %bb35
+
+bb60: ; preds = %bb35
+ br label %bb61
+
+bb61: ; preds = %bb60, %bb31
+ ret void
+}
+
+attributes #0 = { nofree noinline norecurse nosync nounwind memory(write) "target-cpu"="hexagonv73" "target-features"="+hvx-length64b,+hvxv73,+v73" }
diff --git a/llvm/test/CodeGen/Mips/hf1_body.ll b/llvm/test/CodeGen/Mips/hf1_body.ll
index 184ea31..c3dea67 100644
--- a/llvm/test/CodeGen/Mips/hf1_body.ll
+++ b/llvm/test/CodeGen/Mips/hf1_body.ll
@@ -23,8 +23,8 @@ entry:
; ALL: .set reorder
; ALL: .reloc 0, R_MIPS_NONE, v_sf
; GAS: la $25, $__fn_local_v_sf
-; IAS: lw $25, %got($$__fn_local_v_sf)($gp)
-; IAS: addiu $25, $25, %lo($$__fn_local_v_sf)
+; IAS: lw $25, %got($__fn_local_v_sf)($gp)
+; IAS: addiu $25, $25, %lo($__fn_local_v_sf)
; ALL: mfc1 $4, $f12
; ALL: jr $25
; ALL: .end __fn_stub_v_sf
diff --git a/llvm/test/CodeGen/NVPTX/intrinsics.ll b/llvm/test/CodeGen/NVPTX/intrinsics.ll
index d84ee67..e160280 100644
--- a/llvm/test/CodeGen/NVPTX/intrinsics.ll
+++ b/llvm/test/CodeGen/NVPTX/intrinsics.ll
@@ -162,6 +162,17 @@ define i64 @test_cyclecounter() {
ret i64 %ret
}
+; CHECK-LABEL: test_steadycounter
+define i64 @test_steadycounter() {
+; CHECK: mov.u64 %r{{.*}}, %globaltimer;
+ %a = tail call i64 @llvm.readsteadycounter()
+; CHECK: mov.u64 %r{{.*}}, %globaltimer;
+ %b = tail call i64 @llvm.readsteadycounter()
+ %ret = add i64 %a, %b
+; CHECK: ret
+ ret i64 %ret
+}
+
declare float @llvm.fabs.f32(float)
declare double @llvm.fabs.f64(double)
declare float @llvm.nvvm.sqrt.f(float)
@@ -178,3 +189,4 @@ declare i64 @llvm.nvvm.read.ptx.sreg.clock64()
declare void @llvm.nvvm.exit()
declare i64 @llvm.nvvm.read.ptx.sreg.globaltimer()
declare i64 @llvm.readcyclecounter()
+declare i64 @llvm.readsteadycounter()
diff --git a/llvm/test/CodeGen/PowerPC/licm-remat.ll b/llvm/test/CodeGen/PowerPC/licm-remat.ll
index ffdaf5d..cf3e3ac 100644
--- a/llvm/test/CodeGen/PowerPC/licm-remat.ll
+++ b/llvm/test/CodeGen/PowerPC/licm-remat.ll
@@ -21,7 +21,7 @@ define linkonce_odr void @ZN6snappyDecompressor_(ptr %this, ptr %writer) {
; CHECK-LABEL: ZN6snappyDecompressor_:
; CHECK: # %bb.0: # %entry
; CHECK: addis 4, 2, .L__ModuleStringPool@toc@ha
-; CHECK: addi 25, 4, .L__ModuleStringPool@toc@l
+; CHECK: addi 26, 4, .L__ModuleStringPool@toc@l
; CHECK: .LBB0_2: # %for.cond
; CHECK-NOT: addis {{[0-9]+}}, 2, .L__ModuleStringPool@toc@ha
; CHECK: bctrl
diff --git a/llvm/test/CodeGen/RISCV/O3-pipeline.ll b/llvm/test/CodeGen/RISCV/O3-pipeline.ll
index e7db8ef..364c1e4 100644
--- a/llvm/test/CodeGen/RISCV/O3-pipeline.ll
+++ b/llvm/test/CodeGen/RISCV/O3-pipeline.ll
@@ -68,6 +68,7 @@
; CHECK-NEXT: Expand reduction intrinsics
; CHECK-NEXT: Natural Loop Information
; CHECK-NEXT: TLS Variable Hoist
+; CHECK-NEXT: Type Promotion
; CHECK-NEXT: CodeGen Prepare
; CHECK-NEXT: Dominator Tree Construction
; CHECK-NEXT: Exception handling preparation
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll
index 5c55113..d08cf57 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll
@@ -731,11 +731,11 @@ define void @caller_aligned_stack() {
; ILP32E-FPELIM-SAVE-RESTORE-LABEL: caller_aligned_stack:
; ILP32E-FPELIM-SAVE-RESTORE: # %bb.0:
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: call t0, __riscv_save_1
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi sp, sp, -48
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: .cfi_def_cfa_offset 56
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi sp, sp, -56
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: .cfi_def_cfa_offset 64
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: .cfi_offset ra, -4
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: .cfi_offset s0, -8
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi s0, sp, 56
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi s0, sp, 64
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: .cfi_def_cfa s0, 0
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: andi sp, sp, -16
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: li a0, 18
@@ -776,18 +776,18 @@ define void @caller_aligned_stack() {
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: li a4, 13
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: sw a6, 32(sp)
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: call callee_aligned_stack
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi sp, s0, -56
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi sp, sp, 48
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi sp, s0, -64
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi sp, sp, 56
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: tail __riscv_restore_1
;
; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_aligned_stack:
; ILP32E-WITHFP-SAVE-RESTORE: # %bb.0:
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: call t0, __riscv_save_1
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi sp, sp, -48
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: .cfi_def_cfa_offset 56
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi sp, sp, -56
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: .cfi_def_cfa_offset 64
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: .cfi_offset ra, -4
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: .cfi_offset s0, -8
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi s0, sp, 56
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi s0, sp, 64
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: .cfi_def_cfa s0, 0
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: andi sp, sp, -16
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: li a0, 18
@@ -828,8 +828,8 @@ define void @caller_aligned_stack() {
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: li a4, 13
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: sw a6, 32(sp)
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: call callee_aligned_stack
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi sp, s0, -56
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi sp, sp, 48
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi sp, s0, -64
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi sp, sp, 56
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: tail __riscv_restore_1
%1 = call i32 @callee_aligned_stack(i32 1, i32 11,
fp128 0xLEB851EB851EB851F400091EB851EB851, i32 12, i32 13,
@@ -1431,11 +1431,11 @@ define i32 @caller_large_scalars() {
; ILP32E-FPELIM-SAVE-RESTORE-LABEL: caller_large_scalars:
; ILP32E-FPELIM-SAVE-RESTORE: # %bb.0:
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: call t0, __riscv_save_1
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi sp, sp, -32
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: .cfi_def_cfa_offset 40
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi sp, sp, -40
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: .cfi_def_cfa_offset 48
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: .cfi_offset ra, -4
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: .cfi_offset s0, -8
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi s0, sp, 40
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi s0, sp, 48
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: .cfi_def_cfa s0, 0
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: andi sp, sp, -16
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: lui a0, 524272
@@ -1443,26 +1443,26 @@ define i32 @caller_large_scalars() {
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: sw zero, 8(sp)
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: sw zero, 4(sp)
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: sw zero, 0(sp)
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: sw zero, 36(sp)
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: sw zero, 32(sp)
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: sw zero, 28(sp)
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: sw zero, 24(sp)
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: sw zero, 20(sp)
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: li a2, 1
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi a0, sp, 16
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi a0, sp, 24
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: mv a1, sp
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: sw a2, 16(sp)
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: sw a2, 24(sp)
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: call callee_large_scalars
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi sp, s0, -40
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi sp, sp, 32
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi sp, s0, -48
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi sp, sp, 40
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: tail __riscv_restore_1
;
; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_large_scalars:
; ILP32E-WITHFP-SAVE-RESTORE: # %bb.0:
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: call t0, __riscv_save_1
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi sp, sp, -32
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: .cfi_def_cfa_offset 40
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi sp, sp, -40
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: .cfi_def_cfa_offset 48
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: .cfi_offset ra, -4
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: .cfi_offset s0, -8
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi s0, sp, 40
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi s0, sp, 48
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: .cfi_def_cfa s0, 0
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: andi sp, sp, -16
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: lui a0, 524272
@@ -1470,16 +1470,16 @@ define i32 @caller_large_scalars() {
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: sw zero, 8(sp)
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: sw zero, 4(sp)
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: sw zero, 0(sp)
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: sw zero, 36(sp)
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: sw zero, 32(sp)
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: sw zero, 28(sp)
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: sw zero, 24(sp)
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: sw zero, 20(sp)
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: li a2, 1
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi a0, sp, 16
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi a0, sp, 24
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: mv a1, sp
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: sw a2, 16(sp)
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: sw a2, 24(sp)
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: call callee_large_scalars
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi sp, s0, -40
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi sp, sp, 32
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi sp, s0, -48
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi sp, sp, 40
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: tail __riscv_restore_1
%1 = call i32 @callee_large_scalars(i128 1, fp128 0xL00000000000000007FFF000000000000)
ret i32 %1
@@ -1688,18 +1688,18 @@ define i32 @caller_large_scalars_exhausted_regs() {
; ILP32E-FPELIM-SAVE-RESTORE-LABEL: caller_large_scalars_exhausted_regs:
; ILP32E-FPELIM-SAVE-RESTORE: # %bb.0:
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: call t0, __riscv_save_1
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi sp, sp, -48
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: .cfi_def_cfa_offset 56
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi sp, sp, -56
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: .cfi_def_cfa_offset 64
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: .cfi_offset ra, -4
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: .cfi_offset s0, -8
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi s0, sp, 56
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi s0, sp, 64
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: .cfi_def_cfa s0, 0
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: andi sp, sp, -16
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi a0, sp, 16
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: sw a0, 12(sp)
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: li a0, 9
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: sw a0, 8(sp)
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi a0, sp, 32
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi a0, sp, 40
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: sw a0, 4(sp)
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: li a0, 7
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: sw a0, 0(sp)
@@ -1708,37 +1708,37 @@ define i32 @caller_large_scalars_exhausted_regs() {
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: sw zero, 24(sp)
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: sw zero, 20(sp)
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: sw zero, 16(sp)
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: sw zero, 44(sp)
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: sw zero, 40(sp)
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: sw zero, 52(sp)
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: sw zero, 48(sp)
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: li a0, 8
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: sw a0, 32(sp)
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: sw a0, 40(sp)
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: li a0, 1
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: li a1, 2
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: li a2, 3
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: li a3, 4
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: li a4, 5
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: li a5, 6
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: sw zero, 36(sp)
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: sw zero, 44(sp)
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: call callee_large_scalars_exhausted_regs
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi sp, s0, -56
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi sp, sp, 48
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi sp, s0, -64
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi sp, sp, 56
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: tail __riscv_restore_1
;
; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_large_scalars_exhausted_regs:
; ILP32E-WITHFP-SAVE-RESTORE: # %bb.0:
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: call t0, __riscv_save_1
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi sp, sp, -48
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: .cfi_def_cfa_offset 56
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi sp, sp, -56
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: .cfi_def_cfa_offset 64
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: .cfi_offset ra, -4
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: .cfi_offset s0, -8
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi s0, sp, 56
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi s0, sp, 64
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: .cfi_def_cfa s0, 0
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: andi sp, sp, -16
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi a0, sp, 16
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: sw a0, 12(sp)
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: li a0, 9
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: sw a0, 8(sp)
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi a0, sp, 32
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi a0, sp, 40
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: sw a0, 4(sp)
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: li a0, 7
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: sw a0, 0(sp)
@@ -1747,20 +1747,20 @@ define i32 @caller_large_scalars_exhausted_regs() {
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: sw zero, 24(sp)
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: sw zero, 20(sp)
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: sw zero, 16(sp)
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: sw zero, 44(sp)
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: sw zero, 40(sp)
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: sw zero, 52(sp)
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: sw zero, 48(sp)
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: li a0, 8
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: sw a0, 32(sp)
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: sw a0, 40(sp)
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: li a0, 1
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: li a1, 2
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: li a2, 3
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: li a3, 4
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: li a4, 5
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: li a5, 6
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: sw zero, 36(sp)
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: sw zero, 44(sp)
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: call callee_large_scalars_exhausted_regs
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi sp, s0, -56
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi sp, sp, 48
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi sp, s0, -64
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi sp, sp, 56
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: tail __riscv_restore_1
%1 = call i32 @callee_large_scalars_exhausted_regs(
i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i128 8, i32 9,
@@ -2346,33 +2346,33 @@ define void @caller_large_scalar_ret() {
; ILP32E-FPELIM-SAVE-RESTORE-LABEL: caller_large_scalar_ret:
; ILP32E-FPELIM-SAVE-RESTORE: # %bb.0:
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: call t0, __riscv_save_1
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi sp, sp, -16
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: .cfi_def_cfa_offset 24
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi sp, sp, -24
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: .cfi_def_cfa_offset 32
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: .cfi_offset ra, -4
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: .cfi_offset s0, -8
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi s0, sp, 24
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi s0, sp, 32
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: .cfi_def_cfa s0, 0
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: andi sp, sp, -16
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: mv a0, sp
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: call callee_large_scalar_ret
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi sp, s0, -24
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi sp, sp, 16
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi sp, s0, -32
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: addi sp, sp, 24
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: tail __riscv_restore_1
;
; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_large_scalar_ret:
; ILP32E-WITHFP-SAVE-RESTORE: # %bb.0:
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: call t0, __riscv_save_1
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi sp, sp, -16
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: .cfi_def_cfa_offset 24
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi sp, sp, -24
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: .cfi_def_cfa_offset 32
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: .cfi_offset ra, -4
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: .cfi_offset s0, -8
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi s0, sp, 24
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi s0, sp, 32
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: .cfi_def_cfa s0, 0
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: andi sp, sp, -16
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: mv a0, sp
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: call callee_large_scalar_ret
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi sp, s0, -24
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi sp, sp, 16
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi sp, s0, -32
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi sp, sp, 24
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: tail __riscv_restore_1
%1 = call fp128 @callee_large_scalar_ret()
ret void
diff --git a/llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll b/llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll
index 9e7f2e9..6e3a505 100644
--- a/llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll
+++ b/llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll
@@ -254,21 +254,39 @@ define i1 @shifts_necmp_i64_i8(i64 %x) nounwind {
; ---------------------------------------------------------------------------- ;
define i1 @add_ultcmp_i16_i8(i16 %x) nounwind {
-; RV32-LABEL: add_ultcmp_i16_i8:
-; RV32: # %bb.0:
-; RV32-NEXT: addi a0, a0, -128
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: srli a0, a0, 24
-; RV32-NEXT: sltiu a0, a0, 255
-; RV32-NEXT: ret
+; RV32I-LABEL: add_ultcmp_i16_i8:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a0, a0, 16
+; RV32I-NEXT: srli a0, a0, 16
+; RV32I-NEXT: addi a0, a0, -128
+; RV32I-NEXT: srli a0, a0, 8
+; RV32I-NEXT: sltiu a0, a0, 255
+; RV32I-NEXT: ret
;
-; RV64-LABEL: add_ultcmp_i16_i8:
-; RV64: # %bb.0:
-; RV64-NEXT: addi a0, a0, -128
-; RV64-NEXT: slli a0, a0, 48
-; RV64-NEXT: srli a0, a0, 56
-; RV64-NEXT: sltiu a0, a0, 255
-; RV64-NEXT: ret
+; RV64I-LABEL: add_ultcmp_i16_i8:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 48
+; RV64I-NEXT: srli a0, a0, 48
+; RV64I-NEXT: addi a0, a0, -128
+; RV64I-NEXT: srli a0, a0, 8
+; RV64I-NEXT: sltiu a0, a0, 255
+; RV64I-NEXT: ret
+;
+; RV32ZBB-LABEL: add_ultcmp_i16_i8:
+; RV32ZBB: # %bb.0:
+; RV32ZBB-NEXT: zext.h a0, a0
+; RV32ZBB-NEXT: addi a0, a0, -128
+; RV32ZBB-NEXT: srli a0, a0, 8
+; RV32ZBB-NEXT: sltiu a0, a0, 255
+; RV32ZBB-NEXT: ret
+;
+; RV64ZBB-LABEL: add_ultcmp_i16_i8:
+; RV64ZBB: # %bb.0:
+; RV64ZBB-NEXT: zext.h a0, a0
+; RV64ZBB-NEXT: addi a0, a0, -128
+; RV64ZBB-NEXT: srli a0, a0, 8
+; RV64ZBB-NEXT: sltiu a0, a0, 255
+; RV64ZBB-NEXT: ret
%tmp0 = add i16 %x, -128 ; ~0U << (8-1)
%tmp1 = icmp ult i16 %tmp0, -256 ; ~0U << 8
ret i1 %tmp1
@@ -421,21 +439,39 @@ define i1 @add_ultcmp_i64_i8(i64 %x) nounwind {
; Slightly more canonical variant
define i1 @add_ulecmp_i16_i8(i16 %x) nounwind {
-; RV32-LABEL: add_ulecmp_i16_i8:
-; RV32: # %bb.0:
-; RV32-NEXT: addi a0, a0, -128
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: srli a0, a0, 24
-; RV32-NEXT: sltiu a0, a0, 255
-; RV32-NEXT: ret
+; RV32I-LABEL: add_ulecmp_i16_i8:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a0, a0, 16
+; RV32I-NEXT: srli a0, a0, 16
+; RV32I-NEXT: addi a0, a0, -128
+; RV32I-NEXT: srli a0, a0, 8
+; RV32I-NEXT: sltiu a0, a0, 255
+; RV32I-NEXT: ret
;
-; RV64-LABEL: add_ulecmp_i16_i8:
-; RV64: # %bb.0:
-; RV64-NEXT: addi a0, a0, -128
-; RV64-NEXT: slli a0, a0, 48
-; RV64-NEXT: srli a0, a0, 56
-; RV64-NEXT: sltiu a0, a0, 255
-; RV64-NEXT: ret
+; RV64I-LABEL: add_ulecmp_i16_i8:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 48
+; RV64I-NEXT: srli a0, a0, 48
+; RV64I-NEXT: addi a0, a0, -128
+; RV64I-NEXT: srli a0, a0, 8
+; RV64I-NEXT: sltiu a0, a0, 255
+; RV64I-NEXT: ret
+;
+; RV32ZBB-LABEL: add_ulecmp_i16_i8:
+; RV32ZBB: # %bb.0:
+; RV32ZBB-NEXT: zext.h a0, a0
+; RV32ZBB-NEXT: addi a0, a0, -128
+; RV32ZBB-NEXT: srli a0, a0, 8
+; RV32ZBB-NEXT: sltiu a0, a0, 255
+; RV32ZBB-NEXT: ret
+;
+; RV64ZBB-LABEL: add_ulecmp_i16_i8:
+; RV64ZBB: # %bb.0:
+; RV64ZBB-NEXT: zext.h a0, a0
+; RV64ZBB-NEXT: addi a0, a0, -128
+; RV64ZBB-NEXT: srli a0, a0, 8
+; RV64ZBB-NEXT: sltiu a0, a0, 255
+; RV64ZBB-NEXT: ret
%tmp0 = add i16 %x, -128 ; ~0U << (8-1)
%tmp1 = icmp ule i16 %tmp0, -257 ; ~0U << 8 - 1
ret i1 %tmp1
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
index 6056126..d2e0113 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
@@ -1,8 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2
-; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2
-; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1-RV32
-; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1-RV64
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
define void @abs_v16i8(ptr %x) {
; CHECK-LABEL: abs_v16i8:
@@ -87,43 +85,15 @@ define void @abs_v2i64(ptr %x) {
declare <2 x i64> @llvm.abs.v2i64(<2 x i64>, i1)
define void @abs_v32i8(ptr %x) {
-; LMULMAX2-LABEL: abs_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a1, 32
-; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX2-NEXT: vmax.vv v8, v8, v10
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: abs_v32i8:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV32-NEXT: addi a1, a0, 16
-; LMULMAX1-RV32-NEXT: vle8.v v8, (a1)
-; LMULMAX1-RV32-NEXT: vle8.v v9, (a0)
-; LMULMAX1-RV32-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX1-RV32-NEXT: vmax.vv v8, v8, v10
-; LMULMAX1-RV32-NEXT: vrsub.vi v10, v9, 0
-; LMULMAX1-RV32-NEXT: vmax.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vse8.v v9, (a0)
-; LMULMAX1-RV32-NEXT: vse8.v v8, (a1)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: abs_v32i8:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV64-NEXT: addi a1, a0, 16
-; LMULMAX1-RV64-NEXT: vle8.v v8, (a1)
-; LMULMAX1-RV64-NEXT: vle8.v v9, (a0)
-; LMULMAX1-RV64-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX1-RV64-NEXT: vmax.vv v8, v8, v10
-; LMULMAX1-RV64-NEXT: vrsub.vi v10, v9, 0
-; LMULMAX1-RV64-NEXT: vmax.vv v9, v9, v10
-; LMULMAX1-RV64-NEXT: vse8.v v9, (a0)
-; LMULMAX1-RV64-NEXT: vse8.v v8, (a1)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: abs_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 32
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vrsub.vi v10, v8, 0
+; CHECK-NEXT: vmax.vv v8, v8, v10
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %x
%b = call <32 x i8> @llvm.abs.v32i8(<32 x i8> %a, i1 false)
store <32 x i8> %b, ptr %x
@@ -132,42 +102,14 @@ define void @abs_v32i8(ptr %x) {
declare <32 x i8> @llvm.abs.v32i8(<32 x i8>, i1)
define void @abs_v16i16(ptr %x) {
-; LMULMAX2-LABEL: abs_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX2-NEXT: vmax.vv v8, v8, v10
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: abs_v16i16:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV32-NEXT: addi a1, a0, 16
-; LMULMAX1-RV32-NEXT: vle16.v v8, (a1)
-; LMULMAX1-RV32-NEXT: vle16.v v9, (a0)
-; LMULMAX1-RV32-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX1-RV32-NEXT: vmax.vv v8, v8, v10
-; LMULMAX1-RV32-NEXT: vrsub.vi v10, v9, 0
-; LMULMAX1-RV32-NEXT: vmax.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vse16.v v9, (a0)
-; LMULMAX1-RV32-NEXT: vse16.v v8, (a1)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: abs_v16i16:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV64-NEXT: addi a1, a0, 16
-; LMULMAX1-RV64-NEXT: vle16.v v8, (a1)
-; LMULMAX1-RV64-NEXT: vle16.v v9, (a0)
-; LMULMAX1-RV64-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX1-RV64-NEXT: vmax.vv v8, v8, v10
-; LMULMAX1-RV64-NEXT: vrsub.vi v10, v9, 0
-; LMULMAX1-RV64-NEXT: vmax.vv v9, v9, v10
-; LMULMAX1-RV64-NEXT: vse16.v v9, (a0)
-; LMULMAX1-RV64-NEXT: vse16.v v8, (a1)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: abs_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vrsub.vi v10, v8, 0
+; CHECK-NEXT: vmax.vv v8, v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <16 x i16>, ptr %x
%b = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %a, i1 false)
store <16 x i16> %b, ptr %x
@@ -176,42 +118,14 @@ define void @abs_v16i16(ptr %x) {
declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1)
define void @abs_v8i32(ptr %x) {
-; LMULMAX2-LABEL: abs_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX2-NEXT: vmax.vv v8, v8, v10
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: abs_v8i32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: addi a1, a0, 16
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a1)
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a0)
-; LMULMAX1-RV32-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX1-RV32-NEXT: vmax.vv v8, v8, v10
-; LMULMAX1-RV32-NEXT: vrsub.vi v10, v9, 0
-; LMULMAX1-RV32-NEXT: vmax.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vse32.v v9, (a0)
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a1)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: abs_v8i32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: addi a1, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a1)
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a0)
-; LMULMAX1-RV64-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX1-RV64-NEXT: vmax.vv v8, v8, v10
-; LMULMAX1-RV64-NEXT: vrsub.vi v10, v9, 0
-; LMULMAX1-RV64-NEXT: vmax.vv v9, v9, v10
-; LMULMAX1-RV64-NEXT: vse32.v v9, (a0)
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a1)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: abs_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vrsub.vi v10, v8, 0
+; CHECK-NEXT: vmax.vv v8, v8, v10
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
%b = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %a, i1 false)
store <8 x i32> %b, ptr %x
@@ -220,45 +134,68 @@ define void @abs_v8i32(ptr %x) {
declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1)
define void @abs_v4i64(ptr %x) {
-; LMULMAX2-LABEL: abs_v4i64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX2-NEXT: vmax.vv v8, v8, v10
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: abs_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: addi a1, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a1)
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a0)
-; LMULMAX1-RV32-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX1-RV32-NEXT: vmax.vv v8, v8, v10
-; LMULMAX1-RV32-NEXT: vrsub.vi v10, v9, 0
-; LMULMAX1-RV32-NEXT: vmax.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a1)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: abs_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: addi a1, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a1)
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a0)
-; LMULMAX1-RV64-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX1-RV64-NEXT: vmax.vv v8, v8, v10
-; LMULMAX1-RV64-NEXT: vrsub.vi v10, v9, 0
-; LMULMAX1-RV64-NEXT: vmax.vv v9, v9, v10
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a1)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: abs_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vrsub.vi v10, v8, 0
+; CHECK-NEXT: vmax.vv v8, v8, v10
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <4 x i64>, ptr %x
%b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a, i1 false)
store <4 x i64> %b, ptr %x
ret void
}
declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1)
+
+define void @abs_v4i64_of_sext_v4i8(ptr %x) {
+; CHECK-LABEL: abs_v4i64_of_sext_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsext.vf8 v10, v8
+; CHECK-NEXT: vrsub.vi v8, v10, 0
+; CHECK-NEXT: vmax.vv v8, v10, v8
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <4 x i8>, ptr %x
+ %a.ext = sext <4 x i8> %a to <4 x i64>
+ %b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a.ext, i1 false)
+ store <4 x i64> %b, ptr %x
+ ret void
+}
+
+define void @abs_v4i64_of_sext_v4i16(ptr %x) {
+; CHECK-LABEL: abs_v4i64_of_sext_v4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vrsub.vi v8, v10, 0
+; CHECK-NEXT: vmax.vv v8, v10, v8
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <4 x i16>, ptr %x
+ %a.ext = sext <4 x i16> %a to <4 x i64>
+ %b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a.ext, i1 false)
+ store <4 x i64> %b, ptr %x
+ ret void
+}
+
+define void @abs_v4i64_of_sext_v4i32(ptr %x) {
+; CHECK-LABEL: abs_v4i64_of_sext_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vrsub.vi v8, v10, 0
+; CHECK-NEXT: vmax.vv v8, v10, v8
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <4 x i32>, ptr %x
+ %a.ext = sext <4 x i32> %a to <4 x i64>
+ %b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a.ext, i1 false)
+ store <4 x i64> %b, ptr %x
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll
index 1957829..012f943 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll
@@ -1,73 +1,40 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32,LMULMAX2-RV32
-; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64,LMULMAX2-RV64
-; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32,LMULMAX1-RV32
-; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64,LMULMAX1-RV64
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
; RUN: llc -mtriple=riscv32 -mattr=+v,+zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVBB
; RUN: llc -mtriple=riscv64 -mattr=+v,+zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVBB
define void @bitreverse_v8i16(ptr %x, ptr %y) {
-; RV32-LABEL: bitreverse_v8i16:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: vsrl.vi v9, v8, 8
-; RV32-NEXT: vsll.vi v8, v8, 8
-; RV32-NEXT: vor.vv v8, v8, v9
-; RV32-NEXT: vsrl.vi v9, v8, 4
-; RV32-NEXT: lui a1, 1
-; RV32-NEXT: addi a1, a1, -241
-; RV32-NEXT: vand.vx v9, v9, a1
-; RV32-NEXT: vand.vx v8, v8, a1
-; RV32-NEXT: vsll.vi v8, v8, 4
-; RV32-NEXT: vor.vv v8, v9, v8
-; RV32-NEXT: vsrl.vi v9, v8, 2
-; RV32-NEXT: lui a1, 3
-; RV32-NEXT: addi a1, a1, 819
-; RV32-NEXT: vand.vx v9, v9, a1
-; RV32-NEXT: vand.vx v8, v8, a1
-; RV32-NEXT: vsll.vi v8, v8, 2
-; RV32-NEXT: vor.vv v8, v9, v8
-; RV32-NEXT: vsrl.vi v9, v8, 1
-; RV32-NEXT: lui a1, 5
-; RV32-NEXT: addi a1, a1, 1365
-; RV32-NEXT: vand.vx v9, v9, a1
-; RV32-NEXT: vand.vx v8, v8, a1
-; RV32-NEXT: vadd.vv v8, v8, v8
-; RV32-NEXT: vor.vv v8, v9, v8
-; RV32-NEXT: vse16.v v8, (a0)
-; RV32-NEXT: ret
-;
-; RV64-LABEL: bitreverse_v8i16:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: vsrl.vi v9, v8, 8
-; RV64-NEXT: vsll.vi v8, v8, 8
-; RV64-NEXT: vor.vv v8, v8, v9
-; RV64-NEXT: vsrl.vi v9, v8, 4
-; RV64-NEXT: lui a1, 1
-; RV64-NEXT: addi a1, a1, -241
-; RV64-NEXT: vand.vx v9, v9, a1
-; RV64-NEXT: vand.vx v8, v8, a1
-; RV64-NEXT: vsll.vi v8, v8, 4
-; RV64-NEXT: vor.vv v8, v9, v8
-; RV64-NEXT: vsrl.vi v9, v8, 2
-; RV64-NEXT: lui a1, 3
-; RV64-NEXT: addi a1, a1, 819
-; RV64-NEXT: vand.vx v9, v9, a1
-; RV64-NEXT: vand.vx v8, v8, a1
-; RV64-NEXT: vsll.vi v8, v8, 2
-; RV64-NEXT: vor.vv v8, v9, v8
-; RV64-NEXT: vsrl.vi v9, v8, 1
-; RV64-NEXT: lui a1, 5
-; RV64-NEXT: addi a1, a1, 1365
-; RV64-NEXT: vand.vx v9, v9, a1
-; RV64-NEXT: vand.vx v8, v8, a1
-; RV64-NEXT: vadd.vv v8, v8, v8
-; RV64-NEXT: vor.vv v8, v9, v8
-; RV64-NEXT: vse16.v v8, (a0)
-; RV64-NEXT: ret
+; CHECK-LABEL: bitreverse_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsrl.vi v9, v8, 8
+; CHECK-NEXT: vsll.vi v8, v8, 8
+; CHECK-NEXT: vor.vv v8, v8, v9
+; CHECK-NEXT: vsrl.vi v9, v8, 4
+; CHECK-NEXT: lui a1, 1
+; CHECK-NEXT: addi a1, a1, -241
+; CHECK-NEXT: vand.vx v9, v9, a1
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: vsll.vi v8, v8, 4
+; CHECK-NEXT: vor.vv v8, v9, v8
+; CHECK-NEXT: vsrl.vi v9, v8, 2
+; CHECK-NEXT: lui a1, 3
+; CHECK-NEXT: addi a1, a1, 819
+; CHECK-NEXT: vand.vx v9, v9, a1
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: vsll.vi v8, v8, 2
+; CHECK-NEXT: vor.vv v8, v9, v8
+; CHECK-NEXT: vsrl.vi v9, v8, 1
+; CHECK-NEXT: lui a1, 5
+; CHECK-NEXT: addi a1, a1, 1365
+; CHECK-NEXT: vand.vx v9, v9, a1
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: vadd.vv v8, v8, v8
+; CHECK-NEXT: vor.vv v8, v9, v8
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
;
; ZVBB-LABEL: bitreverse_v8i16:
; ZVBB: # %bb.0:
@@ -85,83 +52,44 @@ define void @bitreverse_v8i16(ptr %x, ptr %y) {
declare <8 x i16> @llvm.bitreverse.v8i16(<8 x i16>)
define void @bitreverse_v4i32(ptr %x, ptr %y) {
-; RV32-LABEL: bitreverse_v4i32:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT: vle32.v v8, (a0)
-; RV32-NEXT: vsrl.vi v9, v8, 8
-; RV32-NEXT: lui a1, 16
-; RV32-NEXT: addi a1, a1, -256
-; RV32-NEXT: vand.vx v9, v9, a1
-; RV32-NEXT: vsrl.vi v10, v8, 24
-; RV32-NEXT: vor.vv v9, v9, v10
-; RV32-NEXT: vand.vx v10, v8, a1
-; RV32-NEXT: vsll.vi v10, v10, 8
-; RV32-NEXT: vsll.vi v8, v8, 24
-; RV32-NEXT: vor.vv v8, v8, v10
-; RV32-NEXT: vor.vv v8, v8, v9
-; RV32-NEXT: vsrl.vi v9, v8, 4
-; RV32-NEXT: lui a1, 61681
-; RV32-NEXT: addi a1, a1, -241
-; RV32-NEXT: vand.vx v9, v9, a1
-; RV32-NEXT: vand.vx v8, v8, a1
-; RV32-NEXT: vsll.vi v8, v8, 4
-; RV32-NEXT: vor.vv v8, v9, v8
-; RV32-NEXT: vsrl.vi v9, v8, 2
-; RV32-NEXT: lui a1, 209715
-; RV32-NEXT: addi a1, a1, 819
-; RV32-NEXT: vand.vx v9, v9, a1
-; RV32-NEXT: vand.vx v8, v8, a1
-; RV32-NEXT: vsll.vi v8, v8, 2
-; RV32-NEXT: vor.vv v8, v9, v8
-; RV32-NEXT: vsrl.vi v9, v8, 1
-; RV32-NEXT: lui a1, 349525
-; RV32-NEXT: addi a1, a1, 1365
-; RV32-NEXT: vand.vx v9, v9, a1
-; RV32-NEXT: vand.vx v8, v8, a1
-; RV32-NEXT: vadd.vv v8, v8, v8
-; RV32-NEXT: vor.vv v8, v9, v8
-; RV32-NEXT: vse32.v v8, (a0)
-; RV32-NEXT: ret
-;
-; RV64-LABEL: bitreverse_v4i32:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; RV64-NEXT: vle32.v v8, (a0)
-; RV64-NEXT: vsrl.vi v9, v8, 8
-; RV64-NEXT: lui a1, 16
-; RV64-NEXT: addi a1, a1, -256
-; RV64-NEXT: vand.vx v9, v9, a1
-; RV64-NEXT: vsrl.vi v10, v8, 24
-; RV64-NEXT: vor.vv v9, v9, v10
-; RV64-NEXT: vand.vx v10, v8, a1
-; RV64-NEXT: vsll.vi v10, v10, 8
-; RV64-NEXT: vsll.vi v8, v8, 24
-; RV64-NEXT: vor.vv v8, v8, v10
-; RV64-NEXT: vor.vv v8, v8, v9
-; RV64-NEXT: vsrl.vi v9, v8, 4
-; RV64-NEXT: lui a1, 61681
-; RV64-NEXT: addi a1, a1, -241
-; RV64-NEXT: vand.vx v9, v9, a1
-; RV64-NEXT: vand.vx v8, v8, a1
-; RV64-NEXT: vsll.vi v8, v8, 4
-; RV64-NEXT: vor.vv v8, v9, v8
-; RV64-NEXT: vsrl.vi v9, v8, 2
-; RV64-NEXT: lui a1, 209715
-; RV64-NEXT: addi a1, a1, 819
-; RV64-NEXT: vand.vx v9, v9, a1
-; RV64-NEXT: vand.vx v8, v8, a1
-; RV64-NEXT: vsll.vi v8, v8, 2
-; RV64-NEXT: vor.vv v8, v9, v8
-; RV64-NEXT: vsrl.vi v9, v8, 1
-; RV64-NEXT: lui a1, 349525
-; RV64-NEXT: addi a1, a1, 1365
-; RV64-NEXT: vand.vx v9, v9, a1
-; RV64-NEXT: vand.vx v8, v8, a1
-; RV64-NEXT: vadd.vv v8, v8, v8
-; RV64-NEXT: vor.vv v8, v9, v8
-; RV64-NEXT: vse32.v v8, (a0)
-; RV64-NEXT: ret
+; CHECK-LABEL: bitreverse_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsrl.vi v9, v8, 8
+; CHECK-NEXT: lui a1, 16
+; CHECK-NEXT: addi a1, a1, -256
+; CHECK-NEXT: vand.vx v9, v9, a1
+; CHECK-NEXT: vsrl.vi v10, v8, 24
+; CHECK-NEXT: vor.vv v9, v9, v10
+; CHECK-NEXT: vand.vx v10, v8, a1
+; CHECK-NEXT: vsll.vi v10, v10, 8
+; CHECK-NEXT: vsll.vi v8, v8, 24
+; CHECK-NEXT: vor.vv v8, v8, v10
+; CHECK-NEXT: vor.vv v8, v8, v9
+; CHECK-NEXT: vsrl.vi v9, v8, 4
+; CHECK-NEXT: lui a1, 61681
+; CHECK-NEXT: addi a1, a1, -241
+; CHECK-NEXT: vand.vx v9, v9, a1
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: vsll.vi v8, v8, 4
+; CHECK-NEXT: vor.vv v8, v9, v8
+; CHECK-NEXT: vsrl.vi v9, v8, 2
+; CHECK-NEXT: lui a1, 209715
+; CHECK-NEXT: addi a1, a1, 819
+; CHECK-NEXT: vand.vx v9, v9, a1
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: vsll.vi v8, v8, 2
+; CHECK-NEXT: vor.vv v8, v9, v8
+; CHECK-NEXT: vsrl.vi v9, v8, 1
+; CHECK-NEXT: lui a1, 349525
+; CHECK-NEXT: addi a1, a1, 1365
+; CHECK-NEXT: vand.vx v9, v9, a1
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: vadd.vv v8, v8, v8
+; CHECK-NEXT: vor.vv v8, v9, v8
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
;
; ZVBB-LABEL: bitreverse_v4i32:
; ZVBB: # %bb.0:
@@ -328,171 +256,36 @@ define void @bitreverse_v2i64(ptr %x, ptr %y) {
declare <2 x i64> @llvm.bitreverse.v2i64(<2 x i64>)
define void @bitreverse_v16i16(ptr %x, ptr %y) {
-; LMULMAX2-RV32-LABEL: bitreverse_v16i16:
-; LMULMAX2-RV32: # %bb.0:
-; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX2-RV32-NEXT: vsll.vi v8, v8, 8
-; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV32-NEXT: lui a1, 1
-; LMULMAX2-RV32-NEXT: addi a1, a1, -241
-; LMULMAX2-RV32-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV32-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32-NEXT: vsll.vi v8, v8, 4
-; LMULMAX2-RV32-NEXT: vor.vv v8, v10, v8
-; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX2-RV32-NEXT: lui a1, 3
-; LMULMAX2-RV32-NEXT: addi a1, a1, 819
-; LMULMAX2-RV32-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV32-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32-NEXT: vsll.vi v8, v8, 2
-; LMULMAX2-RV32-NEXT: vor.vv v8, v10, v8
-; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV32-NEXT: lui a1, 5
-; LMULMAX2-RV32-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV32-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV32-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v8
-; LMULMAX2-RV32-NEXT: vor.vv v8, v10, v8
-; LMULMAX2-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV32-NEXT: ret
-;
-; LMULMAX2-RV64-LABEL: bitreverse_v16i16:
-; LMULMAX2-RV64: # %bb.0:
-; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-RV64-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX2-RV64-NEXT: vsll.vi v8, v8, 8
-; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV64-NEXT: lui a1, 1
-; LMULMAX2-RV64-NEXT: addi a1, a1, -241
-; LMULMAX2-RV64-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT: vsll.vi v8, v8, 4
-; LMULMAX2-RV64-NEXT: vor.vv v8, v10, v8
-; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX2-RV64-NEXT: lui a1, 3
-; LMULMAX2-RV64-NEXT: addi a1, a1, 819
-; LMULMAX2-RV64-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT: vsll.vi v8, v8, 2
-; LMULMAX2-RV64-NEXT: vor.vv v8, v10, v8
-; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV64-NEXT: lui a1, 5
-; LMULMAX2-RV64-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV64-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT: vadd.vv v8, v8, v8
-; LMULMAX2-RV64-NEXT: vor.vv v8, v10, v8
-; LMULMAX2-RV64-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV64-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: bitreverse_v16i16:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV32-NEXT: addi a1, a0, 16
-; LMULMAX1-RV32-NEXT: vle16.v v8, (a1)
-; LMULMAX1-RV32-NEXT: vle16.v v9, (a0)
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX1-RV32-NEXT: vsll.vi v8, v8, 8
-; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v10
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX1-RV32-NEXT: lui a2, 1
-; LMULMAX1-RV32-NEXT: addi a2, a2, -241
-; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-RV32-NEXT: vand.vx v8, v8, a2
-; LMULMAX1-RV32-NEXT: vsll.vi v8, v8, 4
-; LMULMAX1-RV32-NEXT: vor.vv v8, v10, v8
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX1-RV32-NEXT: lui a3, 3
-; LMULMAX1-RV32-NEXT: addi a3, a3, 819
-; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a3
-; LMULMAX1-RV32-NEXT: vand.vx v8, v8, a3
-; LMULMAX1-RV32-NEXT: vsll.vi v8, v8, 2
-; LMULMAX1-RV32-NEXT: vor.vv v8, v10, v8
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX1-RV32-NEXT: lui a4, 5
-; LMULMAX1-RV32-NEXT: addi a4, a4, 1365
-; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a4
-; LMULMAX1-RV32-NEXT: vand.vx v8, v8, a4
-; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v8
-; LMULMAX1-RV32-NEXT: vor.vv v8, v10, v8
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 8
-; LMULMAX1-RV32-NEXT: vsll.vi v9, v9, 8
-; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 4
-; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-RV32-NEXT: vand.vx v9, v9, a2
-; LMULMAX1-RV32-NEXT: vsll.vi v9, v9, 4
-; LMULMAX1-RV32-NEXT: vor.vv v9, v10, v9
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 2
-; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a3
-; LMULMAX1-RV32-NEXT: vand.vx v9, v9, a3
-; LMULMAX1-RV32-NEXT: vsll.vi v9, v9, 2
-; LMULMAX1-RV32-NEXT: vor.vv v9, v10, v9
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 1
-; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a4
-; LMULMAX1-RV32-NEXT: vand.vx v9, v9, a4
-; LMULMAX1-RV32-NEXT: vadd.vv v9, v9, v9
-; LMULMAX1-RV32-NEXT: vor.vv v9, v10, v9
-; LMULMAX1-RV32-NEXT: vse16.v v9, (a0)
-; LMULMAX1-RV32-NEXT: vse16.v v8, (a1)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: bitreverse_v16i16:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV64-NEXT: addi a1, a0, 16
-; LMULMAX1-RV64-NEXT: vle16.v v8, (a1)
-; LMULMAX1-RV64-NEXT: vle16.v v9, (a0)
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX1-RV64-NEXT: vsll.vi v8, v8, 8
-; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v10
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX1-RV64-NEXT: lui a2, 1
-; LMULMAX1-RV64-NEXT: addi a2, a2, -241
-; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a2
-; LMULMAX1-RV64-NEXT: vsll.vi v8, v8, 4
-; LMULMAX1-RV64-NEXT: vor.vv v8, v10, v8
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX1-RV64-NEXT: lui a3, 3
-; LMULMAX1-RV64-NEXT: addi a3, a3, 819
-; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a3
-; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a3
-; LMULMAX1-RV64-NEXT: vsll.vi v8, v8, 2
-; LMULMAX1-RV64-NEXT: vor.vv v8, v10, v8
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX1-RV64-NEXT: lui a4, 5
-; LMULMAX1-RV64-NEXT: addi a4, a4, 1365
-; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a4
-; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a4
-; LMULMAX1-RV64-NEXT: vadd.vv v8, v8, v8
-; LMULMAX1-RV64-NEXT: vor.vv v8, v10, v8
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 8
-; LMULMAX1-RV64-NEXT: vsll.vi v9, v9, 8
-; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 4
-; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-RV64-NEXT: vand.vx v9, v9, a2
-; LMULMAX1-RV64-NEXT: vsll.vi v9, v9, 4
-; LMULMAX1-RV64-NEXT: vor.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 2
-; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a3
-; LMULMAX1-RV64-NEXT: vand.vx v9, v9, a3
-; LMULMAX1-RV64-NEXT: vsll.vi v9, v9, 2
-; LMULMAX1-RV64-NEXT: vor.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 1
-; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a4
-; LMULMAX1-RV64-NEXT: vand.vx v9, v9, a4
-; LMULMAX1-RV64-NEXT: vadd.vv v9, v9, v9
-; LMULMAX1-RV64-NEXT: vor.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vse16.v v9, (a0)
-; LMULMAX1-RV64-NEXT: vse16.v v8, (a1)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: bitreverse_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsrl.vi v10, v8, 8
+; CHECK-NEXT: vsll.vi v8, v8, 8
+; CHECK-NEXT: vor.vv v8, v8, v10
+; CHECK-NEXT: vsrl.vi v10, v8, 4
+; CHECK-NEXT: lui a1, 1
+; CHECK-NEXT: addi a1, a1, -241
+; CHECK-NEXT: vand.vx v10, v10, a1
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: vsll.vi v8, v8, 4
+; CHECK-NEXT: vor.vv v8, v10, v8
+; CHECK-NEXT: vsrl.vi v10, v8, 2
+; CHECK-NEXT: lui a1, 3
+; CHECK-NEXT: addi a1, a1, 819
+; CHECK-NEXT: vand.vx v10, v10, a1
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: vsll.vi v8, v8, 2
+; CHECK-NEXT: vor.vv v8, v10, v8
+; CHECK-NEXT: vsrl.vi v10, v8, 1
+; CHECK-NEXT: lui a1, 5
+; CHECK-NEXT: addi a1, a1, 1365
+; CHECK-NEXT: vand.vx v10, v10, a1
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: vadd.vv v8, v8, v8
+; CHECK-NEXT: vor.vv v8, v10, v8
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
;
; ZVBB-LABEL: bitreverse_v16i16:
; ZVBB: # %bb.0:
@@ -510,215 +303,44 @@ define void @bitreverse_v16i16(ptr %x, ptr %y) {
declare <16 x i16> @llvm.bitreverse.v16i16(<16 x i16>)
define void @bitreverse_v8i32(ptr %x, ptr %y) {
-; LMULMAX2-RV32-LABEL: bitreverse_v8i32:
-; LMULMAX2-RV32: # %bb.0:
-; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX2-RV32-NEXT: lui a1, 16
-; LMULMAX2-RV32-NEXT: addi a1, a1, -256
-; LMULMAX2-RV32-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV32-NEXT: vsrl.vi v12, v8, 24
-; LMULMAX2-RV32-NEXT: vor.vv v10, v10, v12
-; LMULMAX2-RV32-NEXT: vand.vx v12, v8, a1
-; LMULMAX2-RV32-NEXT: vsll.vi v12, v12, 8
-; LMULMAX2-RV32-NEXT: vsll.vi v8, v8, 24
-; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v12
-; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV32-NEXT: lui a1, 61681
-; LMULMAX2-RV32-NEXT: addi a1, a1, -241
-; LMULMAX2-RV32-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV32-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32-NEXT: vsll.vi v8, v8, 4
-; LMULMAX2-RV32-NEXT: vor.vv v8, v10, v8
-; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX2-RV32-NEXT: lui a1, 209715
-; LMULMAX2-RV32-NEXT: addi a1, a1, 819
-; LMULMAX2-RV32-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV32-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32-NEXT: vsll.vi v8, v8, 2
-; LMULMAX2-RV32-NEXT: vor.vv v8, v10, v8
-; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV32-NEXT: lui a1, 349525
-; LMULMAX2-RV32-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV32-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV32-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v8
-; LMULMAX2-RV32-NEXT: vor.vv v8, v10, v8
-; LMULMAX2-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32-NEXT: ret
-;
-; LMULMAX2-RV64-LABEL: bitreverse_v8i32:
-; LMULMAX2-RV64: # %bb.0:
-; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX2-RV64-NEXT: lui a1, 16
-; LMULMAX2-RV64-NEXT: addi a1, a1, -256
-; LMULMAX2-RV64-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV64-NEXT: vsrl.vi v12, v8, 24
-; LMULMAX2-RV64-NEXT: vor.vv v10, v10, v12
-; LMULMAX2-RV64-NEXT: vand.vx v12, v8, a1
-; LMULMAX2-RV64-NEXT: vsll.vi v12, v12, 8
-; LMULMAX2-RV64-NEXT: vsll.vi v8, v8, 24
-; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v12
-; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV64-NEXT: lui a1, 61681
-; LMULMAX2-RV64-NEXT: addi a1, a1, -241
-; LMULMAX2-RV64-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT: vsll.vi v8, v8, 4
-; LMULMAX2-RV64-NEXT: vor.vv v8, v10, v8
-; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX2-RV64-NEXT: lui a1, 209715
-; LMULMAX2-RV64-NEXT: addi a1, a1, 819
-; LMULMAX2-RV64-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT: vsll.vi v8, v8, 2
-; LMULMAX2-RV64-NEXT: vor.vv v8, v10, v8
-; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV64-NEXT: lui a1, 349525
-; LMULMAX2-RV64-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV64-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT: vadd.vv v8, v8, v8
-; LMULMAX2-RV64-NEXT: vor.vv v8, v10, v8
-; LMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: bitreverse_v8i32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: addi a1, a0, 16
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a1)
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a0)
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX1-RV32-NEXT: lui a2, 16
-; LMULMAX1-RV32-NEXT: addi a2, a2, -256
-; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-RV32-NEXT: vsrl.vi v11, v8, 24
-; LMULMAX1-RV32-NEXT: vor.vv v10, v10, v11
-; LMULMAX1-RV32-NEXT: vand.vx v11, v8, a2
-; LMULMAX1-RV32-NEXT: vsll.vi v11, v11, 8
-; LMULMAX1-RV32-NEXT: vsll.vi v8, v8, 24
-; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v10
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX1-RV32-NEXT: lui a3, 61681
-; LMULMAX1-RV32-NEXT: addi a3, a3, -241
-; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a3
-; LMULMAX1-RV32-NEXT: vand.vx v8, v8, a3
-; LMULMAX1-RV32-NEXT: vsll.vi v8, v8, 4
-; LMULMAX1-RV32-NEXT: vor.vv v8, v10, v8
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX1-RV32-NEXT: lui a4, 209715
-; LMULMAX1-RV32-NEXT: addi a4, a4, 819
-; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a4
-; LMULMAX1-RV32-NEXT: vand.vx v8, v8, a4
-; LMULMAX1-RV32-NEXT: vsll.vi v8, v8, 2
-; LMULMAX1-RV32-NEXT: vor.vv v8, v10, v8
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX1-RV32-NEXT: lui a5, 349525
-; LMULMAX1-RV32-NEXT: addi a5, a5, 1365
-; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a5
-; LMULMAX1-RV32-NEXT: vand.vx v8, v8, a5
-; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v8
-; LMULMAX1-RV32-NEXT: vor.vv v8, v10, v8
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 8
-; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-RV32-NEXT: vsrl.vi v11, v9, 24
-; LMULMAX1-RV32-NEXT: vor.vv v10, v10, v11
-; LMULMAX1-RV32-NEXT: vand.vx v11, v9, a2
-; LMULMAX1-RV32-NEXT: vsll.vi v11, v11, 8
-; LMULMAX1-RV32-NEXT: vsll.vi v9, v9, 24
-; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v11
-; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 4
-; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a3
-; LMULMAX1-RV32-NEXT: vand.vx v9, v9, a3
-; LMULMAX1-RV32-NEXT: vsll.vi v9, v9, 4
-; LMULMAX1-RV32-NEXT: vor.vv v9, v10, v9
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 2
-; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a4
-; LMULMAX1-RV32-NEXT: vand.vx v9, v9, a4
-; LMULMAX1-RV32-NEXT: vsll.vi v9, v9, 2
-; LMULMAX1-RV32-NEXT: vor.vv v9, v10, v9
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 1
-; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a5
-; LMULMAX1-RV32-NEXT: vand.vx v9, v9, a5
-; LMULMAX1-RV32-NEXT: vadd.vv v9, v9, v9
-; LMULMAX1-RV32-NEXT: vor.vv v9, v10, v9
-; LMULMAX1-RV32-NEXT: vse32.v v9, (a0)
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a1)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: bitreverse_v8i32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: addi a1, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a1)
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a0)
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX1-RV64-NEXT: lui a2, 16
-; LMULMAX1-RV64-NEXT: addi a2, a2, -256
-; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-RV64-NEXT: vsrl.vi v11, v8, 24
-; LMULMAX1-RV64-NEXT: vor.vv v10, v10, v11
-; LMULMAX1-RV64-NEXT: vand.vx v11, v8, a2
-; LMULMAX1-RV64-NEXT: vsll.vi v11, v11, 8
-; LMULMAX1-RV64-NEXT: vsll.vi v8, v8, 24
-; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v10
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX1-RV64-NEXT: lui a3, 61681
-; LMULMAX1-RV64-NEXT: addi a3, a3, -241
-; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a3
-; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a3
-; LMULMAX1-RV64-NEXT: vsll.vi v8, v8, 4
-; LMULMAX1-RV64-NEXT: vor.vv v8, v10, v8
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX1-RV64-NEXT: lui a4, 209715
-; LMULMAX1-RV64-NEXT: addi a4, a4, 819
-; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a4
-; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a4
-; LMULMAX1-RV64-NEXT: vsll.vi v8, v8, 2
-; LMULMAX1-RV64-NEXT: vor.vv v8, v10, v8
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX1-RV64-NEXT: lui a5, 349525
-; LMULMAX1-RV64-NEXT: addi a5, a5, 1365
-; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a5
-; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a5
-; LMULMAX1-RV64-NEXT: vadd.vv v8, v8, v8
-; LMULMAX1-RV64-NEXT: vor.vv v8, v10, v8
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 8
-; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-RV64-NEXT: vsrl.vi v11, v9, 24
-; LMULMAX1-RV64-NEXT: vor.vv v10, v10, v11
-; LMULMAX1-RV64-NEXT: vand.vx v11, v9, a2
-; LMULMAX1-RV64-NEXT: vsll.vi v11, v11, 8
-; LMULMAX1-RV64-NEXT: vsll.vi v9, v9, 24
-; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v11
-; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 4
-; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a3
-; LMULMAX1-RV64-NEXT: vand.vx v9, v9, a3
-; LMULMAX1-RV64-NEXT: vsll.vi v9, v9, 4
-; LMULMAX1-RV64-NEXT: vor.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 2
-; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a4
-; LMULMAX1-RV64-NEXT: vand.vx v9, v9, a4
-; LMULMAX1-RV64-NEXT: vsll.vi v9, v9, 2
-; LMULMAX1-RV64-NEXT: vor.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 1
-; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a5
-; LMULMAX1-RV64-NEXT: vand.vx v9, v9, a5
-; LMULMAX1-RV64-NEXT: vadd.vv v9, v9, v9
-; LMULMAX1-RV64-NEXT: vor.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vse32.v v9, (a0)
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a1)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: bitreverse_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsrl.vi v10, v8, 8
+; CHECK-NEXT: lui a1, 16
+; CHECK-NEXT: addi a1, a1, -256
+; CHECK-NEXT: vand.vx v10, v10, a1
+; CHECK-NEXT: vsrl.vi v12, v8, 24
+; CHECK-NEXT: vor.vv v10, v10, v12
+; CHECK-NEXT: vand.vx v12, v8, a1
+; CHECK-NEXT: vsll.vi v12, v12, 8
+; CHECK-NEXT: vsll.vi v8, v8, 24
+; CHECK-NEXT: vor.vv v8, v8, v12
+; CHECK-NEXT: vor.vv v8, v8, v10
+; CHECK-NEXT: vsrl.vi v10, v8, 4
+; CHECK-NEXT: lui a1, 61681
+; CHECK-NEXT: addi a1, a1, -241
+; CHECK-NEXT: vand.vx v10, v10, a1
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: vsll.vi v8, v8, 4
+; CHECK-NEXT: vor.vv v8, v10, v8
+; CHECK-NEXT: vsrl.vi v10, v8, 2
+; CHECK-NEXT: lui a1, 209715
+; CHECK-NEXT: addi a1, a1, 819
+; CHECK-NEXT: vand.vx v10, v10, a1
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: vsll.vi v8, v8, 2
+; CHECK-NEXT: vor.vv v8, v10, v8
+; CHECK-NEXT: vsrl.vi v10, v8, 1
+; CHECK-NEXT: lui a1, 349525
+; CHECK-NEXT: addi a1, a1, 1365
+; CHECK-NEXT: vand.vx v10, v10, a1
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: vadd.vv v8, v8, v8
+; CHECK-NEXT: vor.vv v8, v10, v8
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
;
; ZVBB-LABEL: bitreverse_v8i32:
; ZVBB: # %bb.0:
@@ -736,349 +358,138 @@ define void @bitreverse_v8i32(ptr %x, ptr %y) {
declare <8 x i32> @llvm.bitreverse.v8i32(<8 x i32>)
define void @bitreverse_v4i64(ptr %x, ptr %y) {
-; LMULMAX2-RV32-LABEL: bitreverse_v4i64:
-; LMULMAX2-RV32: # %bb.0:
-; LMULMAX2-RV32-NEXT: addi sp, sp, -16
-; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 16
-; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32-NEXT: sw zero, 12(sp)
-; LMULMAX2-RV32-NEXT: lui a1, 1044480
-; LMULMAX2-RV32-NEXT: sw a1, 8(sp)
-; LMULMAX2-RV32-NEXT: li a1, 56
-; LMULMAX2-RV32-NEXT: vsrl.vx v10, v8, a1
-; LMULMAX2-RV32-NEXT: li a2, 40
-; LMULMAX2-RV32-NEXT: vsrl.vx v12, v8, a2
-; LMULMAX2-RV32-NEXT: lui a3, 16
-; LMULMAX2-RV32-NEXT: addi a3, a3, -256
-; LMULMAX2-RV32-NEXT: vand.vx v12, v12, a3
-; LMULMAX2-RV32-NEXT: vor.vv v10, v12, v10
-; LMULMAX2-RV32-NEXT: vsrl.vi v12, v8, 24
-; LMULMAX2-RV32-NEXT: addi a4, sp, 8
-; LMULMAX2-RV32-NEXT: vlse64.v v14, (a4), zero
-; LMULMAX2-RV32-NEXT: lui a4, 4080
-; LMULMAX2-RV32-NEXT: vand.vx v12, v12, a4
-; LMULMAX2-RV32-NEXT: vsrl.vi v16, v8, 8
-; LMULMAX2-RV32-NEXT: vand.vv v16, v16, v14
-; LMULMAX2-RV32-NEXT: vor.vv v12, v16, v12
-; LMULMAX2-RV32-NEXT: vor.vv v10, v12, v10
-; LMULMAX2-RV32-NEXT: vsll.vx v12, v8, a1
-; LMULMAX2-RV32-NEXT: vand.vx v16, v8, a3
-; LMULMAX2-RV32-NEXT: vsll.vx v16, v16, a2
-; LMULMAX2-RV32-NEXT: vor.vv v12, v12, v16
-; LMULMAX2-RV32-NEXT: vand.vx v16, v8, a4
-; LMULMAX2-RV32-NEXT: vsll.vi v16, v16, 24
-; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v14
-; LMULMAX2-RV32-NEXT: vsll.vi v8, v8, 8
-; LMULMAX2-RV32-NEXT: vor.vv v8, v16, v8
-; LMULMAX2-RV32-NEXT: vor.vv v8, v12, v8
-; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV32-NEXT: lui a1, 61681
-; LMULMAX2-RV32-NEXT: addi a1, a1, -241
-; LMULMAX2-RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vmv.v.x v12, a1
-; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vand.vv v10, v10, v12
-; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v12
-; LMULMAX2-RV32-NEXT: vsll.vi v8, v8, 4
-; LMULMAX2-RV32-NEXT: vor.vv v8, v10, v8
-; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX2-RV32-NEXT: lui a1, 209715
-; LMULMAX2-RV32-NEXT: addi a1, a1, 819
-; LMULMAX2-RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vmv.v.x v12, a1
-; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vand.vv v10, v10, v12
-; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v12
-; LMULMAX2-RV32-NEXT: vsll.vi v8, v8, 2
-; LMULMAX2-RV32-NEXT: vor.vv v8, v10, v8
-; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV32-NEXT: lui a1, 349525
-; LMULMAX2-RV32-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vmv.v.x v12, a1
-; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vand.vv v10, v10, v12
-; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v12
-; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v8
-; LMULMAX2-RV32-NEXT: vor.vv v8, v10, v8
-; LMULMAX2-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32-NEXT: addi sp, sp, 16
-; LMULMAX2-RV32-NEXT: ret
-;
-; LMULMAX2-RV64-LABEL: bitreverse_v4i64:
-; LMULMAX2-RV64: # %bb.0:
-; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64-NEXT: li a1, 56
-; LMULMAX2-RV64-NEXT: vsrl.vx v10, v8, a1
-; LMULMAX2-RV64-NEXT: li a2, 40
-; LMULMAX2-RV64-NEXT: vsrl.vx v12, v8, a2
-; LMULMAX2-RV64-NEXT: lui a3, 16
-; LMULMAX2-RV64-NEXT: addiw a3, a3, -256
-; LMULMAX2-RV64-NEXT: vand.vx v12, v12, a3
-; LMULMAX2-RV64-NEXT: vor.vv v10, v12, v10
-; LMULMAX2-RV64-NEXT: vsrl.vi v12, v8, 24
-; LMULMAX2-RV64-NEXT: lui a4, 4080
-; LMULMAX2-RV64-NEXT: vand.vx v12, v12, a4
-; LMULMAX2-RV64-NEXT: vsrl.vi v14, v8, 8
-; LMULMAX2-RV64-NEXT: li a5, 255
-; LMULMAX2-RV64-NEXT: slli a5, a5, 24
-; LMULMAX2-RV64-NEXT: vand.vx v14, v14, a5
-; LMULMAX2-RV64-NEXT: vor.vv v12, v14, v12
-; LMULMAX2-RV64-NEXT: vor.vv v10, v12, v10
-; LMULMAX2-RV64-NEXT: vand.vx v12, v8, a5
-; LMULMAX2-RV64-NEXT: vsll.vi v12, v12, 8
-; LMULMAX2-RV64-NEXT: vand.vx v14, v8, a4
-; LMULMAX2-RV64-NEXT: vsll.vi v14, v14, 24
-; LMULMAX2-RV64-NEXT: vor.vv v12, v14, v12
-; LMULMAX2-RV64-NEXT: vsll.vx v14, v8, a1
-; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a3
-; LMULMAX2-RV64-NEXT: vsll.vx v8, v8, a2
-; LMULMAX2-RV64-NEXT: vor.vv v8, v14, v8
-; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v12
-; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV64-NEXT: lui a1, 61681
-; LMULMAX2-RV64-NEXT: addiw a1, a1, -241
-; LMULMAX2-RV64-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64-NEXT: add a1, a1, a2
-; LMULMAX2-RV64-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT: vsll.vi v8, v8, 4
-; LMULMAX2-RV64-NEXT: vor.vv v8, v10, v8
-; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX2-RV64-NEXT: lui a1, 209715
-; LMULMAX2-RV64-NEXT: addiw a1, a1, 819
-; LMULMAX2-RV64-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64-NEXT: add a1, a1, a2
-; LMULMAX2-RV64-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT: vsll.vi v8, v8, 2
-; LMULMAX2-RV64-NEXT: vor.vv v8, v10, v8
-; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV64-NEXT: lui a1, 349525
-; LMULMAX2-RV64-NEXT: addiw a1, a1, 1365
-; LMULMAX2-RV64-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64-NEXT: add a1, a1, a2
-; LMULMAX2-RV64-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT: vadd.vv v8, v8, v8
-; LMULMAX2-RV64-NEXT: vor.vv v8, v10, v8
-; LMULMAX2-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: bitreverse_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi sp, sp, -16
-; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 16
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a1, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a1)
-; LMULMAX1-RV32-NEXT: sw zero, 12(sp)
-; LMULMAX1-RV32-NEXT: lui a2, 1044480
-; LMULMAX1-RV32-NEXT: sw a2, 8(sp)
-; LMULMAX1-RV32-NEXT: li a2, 56
-; LMULMAX1-RV32-NEXT: vsrl.vx v10, v9, a2
-; LMULMAX1-RV32-NEXT: li a3, 40
-; LMULMAX1-RV32-NEXT: vsrl.vx v11, v9, a3
-; LMULMAX1-RV32-NEXT: lui a4, 16
-; LMULMAX1-RV32-NEXT: addi a4, a4, -256
-; LMULMAX1-RV32-NEXT: vand.vx v11, v11, a4
-; LMULMAX1-RV32-NEXT: vor.vv v10, v11, v10
-; LMULMAX1-RV32-NEXT: vsrl.vi v11, v9, 24
-; LMULMAX1-RV32-NEXT: addi a5, sp, 8
-; LMULMAX1-RV32-NEXT: vlse64.v v12, (a5), zero
-; LMULMAX1-RV32-NEXT: lui a5, 4080
-; LMULMAX1-RV32-NEXT: vand.vx v11, v11, a5
-; LMULMAX1-RV32-NEXT: vsrl.vi v13, v9, 8
-; LMULMAX1-RV32-NEXT: vand.vv v13, v13, v12
-; LMULMAX1-RV32-NEXT: vor.vv v11, v13, v11
-; LMULMAX1-RV32-NEXT: vor.vv v10, v11, v10
-; LMULMAX1-RV32-NEXT: vand.vv v11, v9, v12
-; LMULMAX1-RV32-NEXT: vsll.vi v11, v11, 8
-; LMULMAX1-RV32-NEXT: vand.vx v13, v9, a5
-; LMULMAX1-RV32-NEXT: vsll.vi v13, v13, 24
-; LMULMAX1-RV32-NEXT: vor.vv v11, v13, v11
-; LMULMAX1-RV32-NEXT: vsll.vx v13, v9, a2
-; LMULMAX1-RV32-NEXT: vand.vx v9, v9, a4
-; LMULMAX1-RV32-NEXT: vsll.vx v9, v9, a3
-; LMULMAX1-RV32-NEXT: vor.vv v9, v13, v9
-; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v11
-; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 4
-; LMULMAX1-RV32-NEXT: lui a6, 61681
-; LMULMAX1-RV32-NEXT: addi a6, a6, -241
-; LMULMAX1-RV32-NEXT: vsetvli a7, zero, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.x v11, a6
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vand.vv v10, v10, v11
-; LMULMAX1-RV32-NEXT: vand.vv v9, v9, v11
-; LMULMAX1-RV32-NEXT: vsll.vi v9, v9, 4
-; LMULMAX1-RV32-NEXT: vor.vv v9, v10, v9
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 2
-; LMULMAX1-RV32-NEXT: lui a6, 209715
-; LMULMAX1-RV32-NEXT: addi a6, a6, 819
-; LMULMAX1-RV32-NEXT: vsetvli a7, zero, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.x v13, a6
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vand.vv v10, v10, v13
-; LMULMAX1-RV32-NEXT: vand.vv v9, v9, v13
-; LMULMAX1-RV32-NEXT: vsll.vi v9, v9, 2
-; LMULMAX1-RV32-NEXT: vor.vv v9, v10, v9
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 1
-; LMULMAX1-RV32-NEXT: lui a6, 349525
-; LMULMAX1-RV32-NEXT: addi a6, a6, 1365
-; LMULMAX1-RV32-NEXT: vsetvli a7, zero, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.x v14, a6
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vand.vv v10, v10, v14
-; LMULMAX1-RV32-NEXT: vand.vv v9, v9, v14
-; LMULMAX1-RV32-NEXT: vadd.vv v9, v9, v9
-; LMULMAX1-RV32-NEXT: vor.vv v9, v10, v9
-; LMULMAX1-RV32-NEXT: vsrl.vx v10, v8, a2
-; LMULMAX1-RV32-NEXT: vsrl.vx v15, v8, a3
-; LMULMAX1-RV32-NEXT: vand.vx v15, v15, a4
-; LMULMAX1-RV32-NEXT: vor.vv v10, v15, v10
-; LMULMAX1-RV32-NEXT: vsrl.vi v15, v8, 24
-; LMULMAX1-RV32-NEXT: vand.vx v15, v15, a5
-; LMULMAX1-RV32-NEXT: vsrl.vi v16, v8, 8
-; LMULMAX1-RV32-NEXT: vand.vv v16, v16, v12
-; LMULMAX1-RV32-NEXT: vor.vv v15, v16, v15
-; LMULMAX1-RV32-NEXT: vor.vv v10, v15, v10
-; LMULMAX1-RV32-NEXT: vsll.vx v15, v8, a2
-; LMULMAX1-RV32-NEXT: vand.vx v16, v8, a4
-; LMULMAX1-RV32-NEXT: vsll.vx v16, v16, a3
-; LMULMAX1-RV32-NEXT: vor.vv v15, v15, v16
-; LMULMAX1-RV32-NEXT: vand.vx v16, v8, a5
-; LMULMAX1-RV32-NEXT: vsll.vi v16, v16, 24
-; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v12
-; LMULMAX1-RV32-NEXT: vsll.vi v8, v8, 8
-; LMULMAX1-RV32-NEXT: vor.vv v8, v16, v8
-; LMULMAX1-RV32-NEXT: vor.vv v8, v15, v8
-; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v10
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX1-RV32-NEXT: vand.vv v10, v10, v11
-; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vsll.vi v8, v8, 4
-; LMULMAX1-RV32-NEXT: vor.vv v8, v10, v8
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX1-RV32-NEXT: vand.vv v10, v10, v13
-; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v13
-; LMULMAX1-RV32-NEXT: vsll.vi v8, v8, 2
-; LMULMAX1-RV32-NEXT: vor.vv v8, v10, v8
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX1-RV32-NEXT: vand.vv v10, v10, v14
-; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v14
-; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v8
-; LMULMAX1-RV32-NEXT: vor.vv v8, v10, v8
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a1)
-; LMULMAX1-RV32-NEXT: addi sp, sp, 16
-; LMULMAX1-RV32-NEXT: ret
+; RV32-LABEL: bitreverse_v4i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vle64.v v8, (a0)
+; RV32-NEXT: sw zero, 12(sp)
+; RV32-NEXT: lui a1, 1044480
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: li a1, 56
+; RV32-NEXT: vsrl.vx v10, v8, a1
+; RV32-NEXT: li a2, 40
+; RV32-NEXT: vsrl.vx v12, v8, a2
+; RV32-NEXT: lui a3, 16
+; RV32-NEXT: addi a3, a3, -256
+; RV32-NEXT: vand.vx v12, v12, a3
+; RV32-NEXT: vor.vv v10, v12, v10
+; RV32-NEXT: vsrl.vi v12, v8, 24
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vlse64.v v14, (a4), zero
+; RV32-NEXT: lui a4, 4080
+; RV32-NEXT: vand.vx v12, v12, a4
+; RV32-NEXT: vsrl.vi v16, v8, 8
+; RV32-NEXT: vand.vv v16, v16, v14
+; RV32-NEXT: vor.vv v12, v16, v12
+; RV32-NEXT: vor.vv v10, v12, v10
+; RV32-NEXT: vsll.vx v12, v8, a1
+; RV32-NEXT: vand.vx v16, v8, a3
+; RV32-NEXT: vsll.vx v16, v16, a2
+; RV32-NEXT: vor.vv v12, v12, v16
+; RV32-NEXT: vand.vx v16, v8, a4
+; RV32-NEXT: vsll.vi v16, v16, 24
+; RV32-NEXT: vand.vv v8, v8, v14
+; RV32-NEXT: vsll.vi v8, v8, 8
+; RV32-NEXT: vor.vv v8, v16, v8
+; RV32-NEXT: vor.vv v8, v12, v8
+; RV32-NEXT: vor.vv v8, v8, v10
+; RV32-NEXT: vsrl.vi v10, v8, 4
+; RV32-NEXT: lui a1, 61681
+; RV32-NEXT: addi a1, a1, -241
+; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; RV32-NEXT: vmv.v.x v12, a1
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vand.vv v10, v10, v12
+; RV32-NEXT: vand.vv v8, v8, v12
+; RV32-NEXT: vsll.vi v8, v8, 4
+; RV32-NEXT: vor.vv v8, v10, v8
+; RV32-NEXT: vsrl.vi v10, v8, 2
+; RV32-NEXT: lui a1, 209715
+; RV32-NEXT: addi a1, a1, 819
+; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; RV32-NEXT: vmv.v.x v12, a1
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vand.vv v10, v10, v12
+; RV32-NEXT: vand.vv v8, v8, v12
+; RV32-NEXT: vsll.vi v8, v8, 2
+; RV32-NEXT: vor.vv v8, v10, v8
+; RV32-NEXT: vsrl.vi v10, v8, 1
+; RV32-NEXT: lui a1, 349525
+; RV32-NEXT: addi a1, a1, 1365
+; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; RV32-NEXT: vmv.v.x v12, a1
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vand.vv v10, v10, v12
+; RV32-NEXT: vand.vv v8, v8, v12
+; RV32-NEXT: vadd.vv v8, v8, v8
+; RV32-NEXT: vor.vv v8, v10, v8
+; RV32-NEXT: vse64.v v8, (a0)
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
;
-; LMULMAX1-RV64-LABEL: bitreverse_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: addi a1, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a1)
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: li a2, 56
-; LMULMAX1-RV64-NEXT: vsrl.vx v10, v9, a2
-; LMULMAX1-RV64-NEXT: li a3, 40
-; LMULMAX1-RV64-NEXT: vsrl.vx v11, v9, a3
-; LMULMAX1-RV64-NEXT: lui a4, 16
-; LMULMAX1-RV64-NEXT: addiw a4, a4, -256
-; LMULMAX1-RV64-NEXT: vand.vx v11, v11, a4
-; LMULMAX1-RV64-NEXT: vor.vv v10, v11, v10
-; LMULMAX1-RV64-NEXT: vsrl.vi v11, v9, 24
-; LMULMAX1-RV64-NEXT: lui a5, 4080
-; LMULMAX1-RV64-NEXT: vand.vx v11, v11, a5
-; LMULMAX1-RV64-NEXT: vsrl.vi v12, v9, 8
-; LMULMAX1-RV64-NEXT: li a6, 255
-; LMULMAX1-RV64-NEXT: slli a6, a6, 24
-; LMULMAX1-RV64-NEXT: vand.vx v12, v12, a6
-; LMULMAX1-RV64-NEXT: vor.vv v11, v12, v11
-; LMULMAX1-RV64-NEXT: vor.vv v10, v11, v10
-; LMULMAX1-RV64-NEXT: vand.vx v11, v9, a6
-; LMULMAX1-RV64-NEXT: vsll.vi v11, v11, 8
-; LMULMAX1-RV64-NEXT: vand.vx v12, v9, a5
-; LMULMAX1-RV64-NEXT: vsll.vi v12, v12, 24
-; LMULMAX1-RV64-NEXT: vor.vv v11, v12, v11
-; LMULMAX1-RV64-NEXT: vsll.vx v12, v9, a2
-; LMULMAX1-RV64-NEXT: vand.vx v9, v9, a4
-; LMULMAX1-RV64-NEXT: vsll.vx v9, v9, a3
-; LMULMAX1-RV64-NEXT: vor.vv v9, v12, v9
-; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v11
-; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 4
-; LMULMAX1-RV64-NEXT: lui a7, 61681
-; LMULMAX1-RV64-NEXT: addiw a7, a7, -241
-; LMULMAX1-RV64-NEXT: slli t0, a7, 32
-; LMULMAX1-RV64-NEXT: add a7, a7, t0
-; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a7
-; LMULMAX1-RV64-NEXT: vand.vx v9, v9, a7
-; LMULMAX1-RV64-NEXT: vsll.vi v9, v9, 4
-; LMULMAX1-RV64-NEXT: vor.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 2
-; LMULMAX1-RV64-NEXT: lui t0, 209715
-; LMULMAX1-RV64-NEXT: addiw t0, t0, 819
-; LMULMAX1-RV64-NEXT: slli t1, t0, 32
-; LMULMAX1-RV64-NEXT: add t0, t0, t1
-; LMULMAX1-RV64-NEXT: vand.vx v10, v10, t0
-; LMULMAX1-RV64-NEXT: vand.vx v9, v9, t0
-; LMULMAX1-RV64-NEXT: vsll.vi v9, v9, 2
-; LMULMAX1-RV64-NEXT: vor.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 1
-; LMULMAX1-RV64-NEXT: lui t1, 349525
-; LMULMAX1-RV64-NEXT: addiw t1, t1, 1365
-; LMULMAX1-RV64-NEXT: slli t2, t1, 32
-; LMULMAX1-RV64-NEXT: add t1, t1, t2
-; LMULMAX1-RV64-NEXT: vand.vx v10, v10, t1
-; LMULMAX1-RV64-NEXT: vand.vx v9, v9, t1
-; LMULMAX1-RV64-NEXT: vadd.vv v9, v9, v9
-; LMULMAX1-RV64-NEXT: vor.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vsrl.vx v10, v8, a2
-; LMULMAX1-RV64-NEXT: vsrl.vx v11, v8, a3
-; LMULMAX1-RV64-NEXT: vand.vx v11, v11, a4
-; LMULMAX1-RV64-NEXT: vor.vv v10, v11, v10
-; LMULMAX1-RV64-NEXT: vsrl.vi v11, v8, 24
-; LMULMAX1-RV64-NEXT: vand.vx v11, v11, a5
-; LMULMAX1-RV64-NEXT: vsrl.vi v12, v8, 8
-; LMULMAX1-RV64-NEXT: vand.vx v12, v12, a6
-; LMULMAX1-RV64-NEXT: vor.vv v11, v12, v11
-; LMULMAX1-RV64-NEXT: vor.vv v10, v11, v10
-; LMULMAX1-RV64-NEXT: vand.vx v11, v8, a6
-; LMULMAX1-RV64-NEXT: vsll.vi v11, v11, 8
-; LMULMAX1-RV64-NEXT: vand.vx v12, v8, a5
-; LMULMAX1-RV64-NEXT: vsll.vi v12, v12, 24
-; LMULMAX1-RV64-NEXT: vor.vv v11, v12, v11
-; LMULMAX1-RV64-NEXT: vsll.vx v12, v8, a2
-; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a4
-; LMULMAX1-RV64-NEXT: vsll.vx v8, v8, a3
-; LMULMAX1-RV64-NEXT: vor.vv v8, v12, v8
-; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v10
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a7
-; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a7
-; LMULMAX1-RV64-NEXT: vsll.vi v8, v8, 4
-; LMULMAX1-RV64-NEXT: vor.vv v8, v10, v8
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX1-RV64-NEXT: vand.vx v10, v10, t0
-; LMULMAX1-RV64-NEXT: vand.vx v8, v8, t0
-; LMULMAX1-RV64-NEXT: vsll.vi v8, v8, 2
-; LMULMAX1-RV64-NEXT: vor.vv v8, v10, v8
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX1-RV64-NEXT: vand.vx v10, v10, t1
-; LMULMAX1-RV64-NEXT: vand.vx v8, v8, t1
-; LMULMAX1-RV64-NEXT: vadd.vv v8, v8, v8
-; LMULMAX1-RV64-NEXT: vor.vv v8, v10, v8
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a1)
-; LMULMAX1-RV64-NEXT: ret
+; RV64-LABEL: bitreverse_v4i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vle64.v v8, (a0)
+; RV64-NEXT: li a1, 56
+; RV64-NEXT: vsrl.vx v10, v8, a1
+; RV64-NEXT: li a2, 40
+; RV64-NEXT: vsrl.vx v12, v8, a2
+; RV64-NEXT: lui a3, 16
+; RV64-NEXT: addiw a3, a3, -256
+; RV64-NEXT: vand.vx v12, v12, a3
+; RV64-NEXT: vor.vv v10, v12, v10
+; RV64-NEXT: vsrl.vi v12, v8, 24
+; RV64-NEXT: lui a4, 4080
+; RV64-NEXT: vand.vx v12, v12, a4
+; RV64-NEXT: vsrl.vi v14, v8, 8
+; RV64-NEXT: li a5, 255
+; RV64-NEXT: slli a5, a5, 24
+; RV64-NEXT: vand.vx v14, v14, a5
+; RV64-NEXT: vor.vv v12, v14, v12
+; RV64-NEXT: vor.vv v10, v12, v10
+; RV64-NEXT: vand.vx v12, v8, a5
+; RV64-NEXT: vsll.vi v12, v12, 8
+; RV64-NEXT: vand.vx v14, v8, a4
+; RV64-NEXT: vsll.vi v14, v14, 24
+; RV64-NEXT: vor.vv v12, v14, v12
+; RV64-NEXT: vsll.vx v14, v8, a1
+; RV64-NEXT: vand.vx v8, v8, a3
+; RV64-NEXT: vsll.vx v8, v8, a2
+; RV64-NEXT: vor.vv v8, v14, v8
+; RV64-NEXT: vor.vv v8, v8, v12
+; RV64-NEXT: vor.vv v8, v8, v10
+; RV64-NEXT: vsrl.vi v10, v8, 4
+; RV64-NEXT: lui a1, 61681
+; RV64-NEXT: addiw a1, a1, -241
+; RV64-NEXT: slli a2, a1, 32
+; RV64-NEXT: add a1, a1, a2
+; RV64-NEXT: vand.vx v10, v10, a1
+; RV64-NEXT: vand.vx v8, v8, a1
+; RV64-NEXT: vsll.vi v8, v8, 4
+; RV64-NEXT: vor.vv v8, v10, v8
+; RV64-NEXT: vsrl.vi v10, v8, 2
+; RV64-NEXT: lui a1, 209715
+; RV64-NEXT: addiw a1, a1, 819
+; RV64-NEXT: slli a2, a1, 32
+; RV64-NEXT: add a1, a1, a2
+; RV64-NEXT: vand.vx v10, v10, a1
+; RV64-NEXT: vand.vx v8, v8, a1
+; RV64-NEXT: vsll.vi v8, v8, 2
+; RV64-NEXT: vor.vv v8, v10, v8
+; RV64-NEXT: vsrl.vi v10, v8, 1
+; RV64-NEXT: lui a1, 349525
+; RV64-NEXT: addiw a1, a1, 1365
+; RV64-NEXT: slli a2, a1, 32
+; RV64-NEXT: add a1, a1, a2
+; RV64-NEXT: vand.vx v10, v10, a1
+; RV64-NEXT: vand.vx v8, v8, a1
+; RV64-NEXT: vadd.vv v8, v8, v8
+; RV64-NEXT: vor.vv v8, v10, v8
+; RV64-NEXT: vse64.v v8, (a0)
+; RV64-NEXT: ret
;
; ZVBB-LABEL: bitreverse_v4i64:
; ZVBB: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll
index 41f7425..d5338f9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll
@@ -1,8 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,LMULMAX2-RV32
-; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,LMULMAX2-RV64
-; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,LMULMAX1-RV32
-; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,LMULMAX1-RV64
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
; RUN: llc -mtriple=riscv32 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVKB
; RUN: llc -mtriple=riscv64 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVKB
@@ -159,57 +157,15 @@ define void @bswap_v2i64(ptr %x, ptr %y) {
declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>)
define void @bswap_v16i16(ptr %x, ptr %y) {
-; LMULMAX2-RV32-LABEL: bswap_v16i16:
-; LMULMAX2-RV32: # %bb.0:
-; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX2-RV32-NEXT: vsll.vi v8, v8, 8
-; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV32-NEXT: ret
-;
-; LMULMAX2-RV64-LABEL: bswap_v16i16:
-; LMULMAX2-RV64: # %bb.0:
-; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-RV64-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX2-RV64-NEXT: vsll.vi v8, v8, 8
-; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV64-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: bswap_v16i16:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV32-NEXT: addi a1, a0, 16
-; LMULMAX1-RV32-NEXT: vle16.v v8, (a1)
-; LMULMAX1-RV32-NEXT: vle16.v v9, (a0)
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX1-RV32-NEXT: vsll.vi v8, v8, 8
-; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v10
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 8
-; LMULMAX1-RV32-NEXT: vsll.vi v9, v9, 8
-; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vse16.v v9, (a0)
-; LMULMAX1-RV32-NEXT: vse16.v v8, (a1)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: bswap_v16i16:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV64-NEXT: addi a1, a0, 16
-; LMULMAX1-RV64-NEXT: vle16.v v8, (a1)
-; LMULMAX1-RV64-NEXT: vle16.v v9, (a0)
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX1-RV64-NEXT: vsll.vi v8, v8, 8
-; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v10
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 8
-; LMULMAX1-RV64-NEXT: vsll.vi v9, v9, 8
-; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-RV64-NEXT: vse16.v v9, (a0)
-; LMULMAX1-RV64-NEXT: vse16.v v8, (a1)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: bswap_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsrl.vi v10, v8, 8
+; CHECK-NEXT: vsll.vi v8, v8, 8
+; CHECK-NEXT: vor.vv v8, v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
;
; ZVKB-LABEL: bswap_v16i16:
; ZVKB: # %bb.0:
@@ -227,101 +183,23 @@ define void @bswap_v16i16(ptr %x, ptr %y) {
declare <16 x i16> @llvm.bswap.v16i16(<16 x i16>)
define void @bswap_v8i32(ptr %x, ptr %y) {
-; LMULMAX2-RV32-LABEL: bswap_v8i32:
-; LMULMAX2-RV32: # %bb.0:
-; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX2-RV32-NEXT: lui a1, 16
-; LMULMAX2-RV32-NEXT: addi a1, a1, -256
-; LMULMAX2-RV32-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV32-NEXT: vsrl.vi v12, v8, 24
-; LMULMAX2-RV32-NEXT: vor.vv v10, v10, v12
-; LMULMAX2-RV32-NEXT: vand.vx v12, v8, a1
-; LMULMAX2-RV32-NEXT: vsll.vi v12, v12, 8
-; LMULMAX2-RV32-NEXT: vsll.vi v8, v8, 24
-; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v12
-; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32-NEXT: ret
-;
-; LMULMAX2-RV64-LABEL: bswap_v8i32:
-; LMULMAX2-RV64: # %bb.0:
-; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX2-RV64-NEXT: lui a1, 16
-; LMULMAX2-RV64-NEXT: addi a1, a1, -256
-; LMULMAX2-RV64-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV64-NEXT: vsrl.vi v12, v8, 24
-; LMULMAX2-RV64-NEXT: vor.vv v10, v10, v12
-; LMULMAX2-RV64-NEXT: vand.vx v12, v8, a1
-; LMULMAX2-RV64-NEXT: vsll.vi v12, v12, 8
-; LMULMAX2-RV64-NEXT: vsll.vi v8, v8, 24
-; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v12
-; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: bswap_v8i32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: addi a1, a0, 16
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a1)
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a0)
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX1-RV32-NEXT: lui a2, 16
-; LMULMAX1-RV32-NEXT: addi a2, a2, -256
-; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-RV32-NEXT: vsrl.vi v11, v8, 24
-; LMULMAX1-RV32-NEXT: vor.vv v10, v10, v11
-; LMULMAX1-RV32-NEXT: vand.vx v11, v8, a2
-; LMULMAX1-RV32-NEXT: vsll.vi v11, v11, 8
-; LMULMAX1-RV32-NEXT: vsll.vi v8, v8, 24
-; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v10
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 8
-; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-RV32-NEXT: vsrl.vi v11, v9, 24
-; LMULMAX1-RV32-NEXT: vor.vv v10, v10, v11
-; LMULMAX1-RV32-NEXT: vand.vx v11, v9, a2
-; LMULMAX1-RV32-NEXT: vsll.vi v11, v11, 8
-; LMULMAX1-RV32-NEXT: vsll.vi v9, v9, 24
-; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v11
-; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vse32.v v9, (a0)
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a1)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: bswap_v8i32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: addi a1, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a1)
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a0)
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX1-RV64-NEXT: lui a2, 16
-; LMULMAX1-RV64-NEXT: addi a2, a2, -256
-; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-RV64-NEXT: vsrl.vi v11, v8, 24
-; LMULMAX1-RV64-NEXT: vor.vv v10, v10, v11
-; LMULMAX1-RV64-NEXT: vand.vx v11, v8, a2
-; LMULMAX1-RV64-NEXT: vsll.vi v11, v11, 8
-; LMULMAX1-RV64-NEXT: vsll.vi v8, v8, 24
-; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v10
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 8
-; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-RV64-NEXT: vsrl.vi v11, v9, 24
-; LMULMAX1-RV64-NEXT: vor.vv v10, v10, v11
-; LMULMAX1-RV64-NEXT: vand.vx v11, v9, a2
-; LMULMAX1-RV64-NEXT: vsll.vi v11, v11, 8
-; LMULMAX1-RV64-NEXT: vsll.vi v9, v9, 24
-; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v11
-; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-RV64-NEXT: vse32.v v9, (a0)
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a1)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: bswap_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsrl.vi v10, v8, 8
+; CHECK-NEXT: lui a1, 16
+; CHECK-NEXT: addi a1, a1, -256
+; CHECK-NEXT: vand.vx v10, v10, a1
+; CHECK-NEXT: vsrl.vi v12, v8, 24
+; CHECK-NEXT: vor.vv v10, v10, v12
+; CHECK-NEXT: vand.vx v12, v8, a1
+; CHECK-NEXT: vsll.vi v12, v12, 8
+; CHECK-NEXT: vsll.vi v8, v8, 24
+; CHECK-NEXT: vor.vv v8, v8, v12
+; CHECK-NEXT: vor.vv v8, v8, v10
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
;
; ZVKB-LABEL: bswap_v8i32:
; ZVKB: # %bb.0:
@@ -339,205 +217,81 @@ define void @bswap_v8i32(ptr %x, ptr %y) {
declare <8 x i32> @llvm.bswap.v8i32(<8 x i32>)
define void @bswap_v4i64(ptr %x, ptr %y) {
-; LMULMAX2-RV32-LABEL: bswap_v4i64:
-; LMULMAX2-RV32: # %bb.0:
-; LMULMAX2-RV32-NEXT: addi sp, sp, -16
-; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 16
-; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32-NEXT: sw zero, 12(sp)
-; LMULMAX2-RV32-NEXT: lui a1, 1044480
-; LMULMAX2-RV32-NEXT: sw a1, 8(sp)
-; LMULMAX2-RV32-NEXT: li a1, 56
-; LMULMAX2-RV32-NEXT: vsrl.vx v10, v8, a1
-; LMULMAX2-RV32-NEXT: li a2, 40
-; LMULMAX2-RV32-NEXT: vsrl.vx v12, v8, a2
-; LMULMAX2-RV32-NEXT: lui a3, 16
-; LMULMAX2-RV32-NEXT: addi a3, a3, -256
-; LMULMAX2-RV32-NEXT: vand.vx v12, v12, a3
-; LMULMAX2-RV32-NEXT: vor.vv v10, v12, v10
-; LMULMAX2-RV32-NEXT: vsrl.vi v12, v8, 24
-; LMULMAX2-RV32-NEXT: addi a4, sp, 8
-; LMULMAX2-RV32-NEXT: vlse64.v v14, (a4), zero
-; LMULMAX2-RV32-NEXT: lui a4, 4080
-; LMULMAX2-RV32-NEXT: vand.vx v12, v12, a4
-; LMULMAX2-RV32-NEXT: vsrl.vi v16, v8, 8
-; LMULMAX2-RV32-NEXT: vand.vv v16, v16, v14
-; LMULMAX2-RV32-NEXT: vor.vv v12, v16, v12
-; LMULMAX2-RV32-NEXT: vor.vv v10, v12, v10
-; LMULMAX2-RV32-NEXT: vsll.vx v12, v8, a1
-; LMULMAX2-RV32-NEXT: vand.vx v16, v8, a3
-; LMULMAX2-RV32-NEXT: vsll.vx v16, v16, a2
-; LMULMAX2-RV32-NEXT: vor.vv v12, v12, v16
-; LMULMAX2-RV32-NEXT: vand.vx v16, v8, a4
-; LMULMAX2-RV32-NEXT: vsll.vi v16, v16, 24
-; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v14
-; LMULMAX2-RV32-NEXT: vsll.vi v8, v8, 8
-; LMULMAX2-RV32-NEXT: vor.vv v8, v16, v8
-; LMULMAX2-RV32-NEXT: vor.vv v8, v12, v8
-; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32-NEXT: addi sp, sp, 16
-; LMULMAX2-RV32-NEXT: ret
-;
-; LMULMAX2-RV64-LABEL: bswap_v4i64:
-; LMULMAX2-RV64: # %bb.0:
-; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64-NEXT: li a1, 56
-; LMULMAX2-RV64-NEXT: vsrl.vx v10, v8, a1
-; LMULMAX2-RV64-NEXT: li a2, 40
-; LMULMAX2-RV64-NEXT: vsrl.vx v12, v8, a2
-; LMULMAX2-RV64-NEXT: lui a3, 16
-; LMULMAX2-RV64-NEXT: addiw a3, a3, -256
-; LMULMAX2-RV64-NEXT: vand.vx v12, v12, a3
-; LMULMAX2-RV64-NEXT: vor.vv v10, v12, v10
-; LMULMAX2-RV64-NEXT: vsrl.vi v12, v8, 24
-; LMULMAX2-RV64-NEXT: lui a4, 4080
-; LMULMAX2-RV64-NEXT: vand.vx v12, v12, a4
-; LMULMAX2-RV64-NEXT: vsrl.vi v14, v8, 8
-; LMULMAX2-RV64-NEXT: li a5, 255
-; LMULMAX2-RV64-NEXT: slli a5, a5, 24
-; LMULMAX2-RV64-NEXT: vand.vx v14, v14, a5
-; LMULMAX2-RV64-NEXT: vor.vv v12, v14, v12
-; LMULMAX2-RV64-NEXT: vor.vv v10, v12, v10
-; LMULMAX2-RV64-NEXT: vand.vx v12, v8, a5
-; LMULMAX2-RV64-NEXT: vsll.vi v12, v12, 8
-; LMULMAX2-RV64-NEXT: vand.vx v14, v8, a4
-; LMULMAX2-RV64-NEXT: vsll.vi v14, v14, 24
-; LMULMAX2-RV64-NEXT: vor.vv v12, v14, v12
-; LMULMAX2-RV64-NEXT: vsll.vx v14, v8, a1
-; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a3
-; LMULMAX2-RV64-NEXT: vsll.vx v8, v8, a2
-; LMULMAX2-RV64-NEXT: vor.vv v8, v14, v8
-; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v12
-; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: bswap_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi sp, sp, -16
-; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 16
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a1, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a1)
-; LMULMAX1-RV32-NEXT: sw zero, 12(sp)
-; LMULMAX1-RV32-NEXT: lui a2, 1044480
-; LMULMAX1-RV32-NEXT: sw a2, 8(sp)
-; LMULMAX1-RV32-NEXT: li a2, 56
-; LMULMAX1-RV32-NEXT: vsrl.vx v10, v9, a2
-; LMULMAX1-RV32-NEXT: li a3, 40
-; LMULMAX1-RV32-NEXT: vsrl.vx v11, v9, a3
-; LMULMAX1-RV32-NEXT: lui a4, 16
-; LMULMAX1-RV32-NEXT: addi a4, a4, -256
-; LMULMAX1-RV32-NEXT: vand.vx v11, v11, a4
-; LMULMAX1-RV32-NEXT: vor.vv v10, v11, v10
-; LMULMAX1-RV32-NEXT: vsrl.vi v11, v9, 24
-; LMULMAX1-RV32-NEXT: addi a5, sp, 8
-; LMULMAX1-RV32-NEXT: vlse64.v v12, (a5), zero
-; LMULMAX1-RV32-NEXT: lui a5, 4080
-; LMULMAX1-RV32-NEXT: vand.vx v11, v11, a5
-; LMULMAX1-RV32-NEXT: vsrl.vi v13, v9, 8
-; LMULMAX1-RV32-NEXT: vand.vv v13, v13, v12
-; LMULMAX1-RV32-NEXT: vor.vv v11, v13, v11
-; LMULMAX1-RV32-NEXT: vor.vv v10, v11, v10
-; LMULMAX1-RV32-NEXT: vand.vv v11, v9, v12
-; LMULMAX1-RV32-NEXT: vsll.vi v11, v11, 8
-; LMULMAX1-RV32-NEXT: vand.vx v13, v9, a5
-; LMULMAX1-RV32-NEXT: vsll.vi v13, v13, 24
-; LMULMAX1-RV32-NEXT: vor.vv v11, v13, v11
-; LMULMAX1-RV32-NEXT: vsll.vx v13, v9, a2
-; LMULMAX1-RV32-NEXT: vand.vx v9, v9, a4
-; LMULMAX1-RV32-NEXT: vsll.vx v9, v9, a3
-; LMULMAX1-RV32-NEXT: vor.vv v9, v13, v9
-; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v11
-; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vsrl.vx v10, v8, a2
-; LMULMAX1-RV32-NEXT: vsrl.vx v11, v8, a3
-; LMULMAX1-RV32-NEXT: vand.vx v11, v11, a4
-; LMULMAX1-RV32-NEXT: vor.vv v10, v11, v10
-; LMULMAX1-RV32-NEXT: vsrl.vi v11, v8, 24
-; LMULMAX1-RV32-NEXT: vand.vx v11, v11, a5
-; LMULMAX1-RV32-NEXT: vsrl.vi v13, v8, 8
-; LMULMAX1-RV32-NEXT: vand.vv v13, v13, v12
-; LMULMAX1-RV32-NEXT: vor.vv v11, v13, v11
-; LMULMAX1-RV32-NEXT: vor.vv v10, v11, v10
-; LMULMAX1-RV32-NEXT: vsll.vx v11, v8, a2
-; LMULMAX1-RV32-NEXT: vand.vx v13, v8, a4
-; LMULMAX1-RV32-NEXT: vsll.vx v13, v13, a3
-; LMULMAX1-RV32-NEXT: vor.vv v11, v11, v13
-; LMULMAX1-RV32-NEXT: vand.vx v13, v8, a5
-; LMULMAX1-RV32-NEXT: vsll.vi v13, v13, 24
-; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v12
-; LMULMAX1-RV32-NEXT: vsll.vi v8, v8, 8
-; LMULMAX1-RV32-NEXT: vor.vv v8, v13, v8
-; LMULMAX1-RV32-NEXT: vor.vv v8, v11, v8
-; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v10
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a1)
-; LMULMAX1-RV32-NEXT: addi sp, sp, 16
-; LMULMAX1-RV32-NEXT: ret
+; RV32-LABEL: bswap_v4i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vle64.v v8, (a0)
+; RV32-NEXT: sw zero, 12(sp)
+; RV32-NEXT: lui a1, 1044480
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: li a1, 56
+; RV32-NEXT: vsrl.vx v10, v8, a1
+; RV32-NEXT: li a2, 40
+; RV32-NEXT: vsrl.vx v12, v8, a2
+; RV32-NEXT: lui a3, 16
+; RV32-NEXT: addi a3, a3, -256
+; RV32-NEXT: vand.vx v12, v12, a3
+; RV32-NEXT: vor.vv v10, v12, v10
+; RV32-NEXT: vsrl.vi v12, v8, 24
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vlse64.v v14, (a4), zero
+; RV32-NEXT: lui a4, 4080
+; RV32-NEXT: vand.vx v12, v12, a4
+; RV32-NEXT: vsrl.vi v16, v8, 8
+; RV32-NEXT: vand.vv v16, v16, v14
+; RV32-NEXT: vor.vv v12, v16, v12
+; RV32-NEXT: vor.vv v10, v12, v10
+; RV32-NEXT: vsll.vx v12, v8, a1
+; RV32-NEXT: vand.vx v16, v8, a3
+; RV32-NEXT: vsll.vx v16, v16, a2
+; RV32-NEXT: vor.vv v12, v12, v16
+; RV32-NEXT: vand.vx v16, v8, a4
+; RV32-NEXT: vsll.vi v16, v16, 24
+; RV32-NEXT: vand.vv v8, v8, v14
+; RV32-NEXT: vsll.vi v8, v8, 8
+; RV32-NEXT: vor.vv v8, v16, v8
+; RV32-NEXT: vor.vv v8, v12, v8
+; RV32-NEXT: vor.vv v8, v8, v10
+; RV32-NEXT: vse64.v v8, (a0)
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
;
-; LMULMAX1-RV64-LABEL: bswap_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: addi a1, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a1)
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a0)
-; LMULMAX1-RV64-NEXT: li a2, 56
-; LMULMAX1-RV64-NEXT: vsrl.vx v10, v8, a2
-; LMULMAX1-RV64-NEXT: li a3, 40
-; LMULMAX1-RV64-NEXT: vsrl.vx v11, v8, a3
-; LMULMAX1-RV64-NEXT: lui a4, 16
-; LMULMAX1-RV64-NEXT: addiw a4, a4, -256
-; LMULMAX1-RV64-NEXT: vand.vx v11, v11, a4
-; LMULMAX1-RV64-NEXT: vor.vv v10, v11, v10
-; LMULMAX1-RV64-NEXT: vsrl.vi v11, v8, 24
-; LMULMAX1-RV64-NEXT: lui a5, 4080
-; LMULMAX1-RV64-NEXT: vand.vx v11, v11, a5
-; LMULMAX1-RV64-NEXT: vsrl.vi v12, v8, 8
-; LMULMAX1-RV64-NEXT: li a6, 255
-; LMULMAX1-RV64-NEXT: slli a6, a6, 24
-; LMULMAX1-RV64-NEXT: vand.vx v12, v12, a6
-; LMULMAX1-RV64-NEXT: vor.vv v11, v12, v11
-; LMULMAX1-RV64-NEXT: vor.vv v10, v11, v10
-; LMULMAX1-RV64-NEXT: vand.vx v11, v8, a6
-; LMULMAX1-RV64-NEXT: vsll.vi v11, v11, 8
-; LMULMAX1-RV64-NEXT: vand.vx v12, v8, a5
-; LMULMAX1-RV64-NEXT: vsll.vi v12, v12, 24
-; LMULMAX1-RV64-NEXT: vor.vv v11, v12, v11
-; LMULMAX1-RV64-NEXT: vsll.vx v12, v8, a2
-; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a4
-; LMULMAX1-RV64-NEXT: vsll.vx v8, v8, a3
-; LMULMAX1-RV64-NEXT: vor.vv v8, v12, v8
-; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v10
-; LMULMAX1-RV64-NEXT: vsrl.vx v10, v9, a2
-; LMULMAX1-RV64-NEXT: vsrl.vx v11, v9, a3
-; LMULMAX1-RV64-NEXT: vand.vx v11, v11, a4
-; LMULMAX1-RV64-NEXT: vor.vv v10, v11, v10
-; LMULMAX1-RV64-NEXT: vsrl.vi v11, v9, 24
-; LMULMAX1-RV64-NEXT: vand.vx v11, v11, a5
-; LMULMAX1-RV64-NEXT: vsrl.vi v12, v9, 8
-; LMULMAX1-RV64-NEXT: vand.vx v12, v12, a6
-; LMULMAX1-RV64-NEXT: vor.vv v11, v12, v11
-; LMULMAX1-RV64-NEXT: vor.vv v10, v11, v10
-; LMULMAX1-RV64-NEXT: vand.vx v11, v9, a6
-; LMULMAX1-RV64-NEXT: vsll.vi v11, v11, 8
-; LMULMAX1-RV64-NEXT: vand.vx v12, v9, a5
-; LMULMAX1-RV64-NEXT: vsll.vi v12, v12, 24
-; LMULMAX1-RV64-NEXT: vor.vv v11, v12, v11
-; LMULMAX1-RV64-NEXT: vsll.vx v12, v9, a2
-; LMULMAX1-RV64-NEXT: vand.vx v9, v9, a4
-; LMULMAX1-RV64-NEXT: vsll.vx v9, v9, a3
-; LMULMAX1-RV64-NEXT: vor.vv v9, v12, v9
-; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v11
-; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a1)
-; LMULMAX1-RV64-NEXT: ret
+; RV64-LABEL: bswap_v4i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vle64.v v8, (a0)
+; RV64-NEXT: li a1, 56
+; RV64-NEXT: vsrl.vx v10, v8, a1
+; RV64-NEXT: li a2, 40
+; RV64-NEXT: vsrl.vx v12, v8, a2
+; RV64-NEXT: lui a3, 16
+; RV64-NEXT: addiw a3, a3, -256
+; RV64-NEXT: vand.vx v12, v12, a3
+; RV64-NEXT: vor.vv v10, v12, v10
+; RV64-NEXT: vsrl.vi v12, v8, 24
+; RV64-NEXT: lui a4, 4080
+; RV64-NEXT: vand.vx v12, v12, a4
+; RV64-NEXT: vsrl.vi v14, v8, 8
+; RV64-NEXT: li a5, 255
+; RV64-NEXT: slli a5, a5, 24
+; RV64-NEXT: vand.vx v14, v14, a5
+; RV64-NEXT: vor.vv v12, v14, v12
+; RV64-NEXT: vor.vv v10, v12, v10
+; RV64-NEXT: vand.vx v12, v8, a5
+; RV64-NEXT: vsll.vi v12, v12, 8
+; RV64-NEXT: vand.vx v14, v8, a4
+; RV64-NEXT: vsll.vi v14, v14, 24
+; RV64-NEXT: vor.vv v12, v14, v12
+; RV64-NEXT: vsll.vx v14, v8, a1
+; RV64-NEXT: vand.vx v8, v8, a3
+; RV64-NEXT: vsll.vx v8, v8, a2
+; RV64-NEXT: vor.vv v8, v14, v8
+; RV64-NEXT: vor.vv v8, v8, v12
+; RV64-NEXT: vor.vv v8, v8, v10
+; RV64-NEXT: vse64.v v8, (a0)
+; RV64-NEXT: ret
;
; ZVKB-LABEL: bswap_v4i64:
; ZVKB: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
index 9ec15e5..63cd42e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
@@ -1,6 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8
-; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=4 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX4
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
define fastcc <4 x i8> @ret_v4i8(ptr %p) {
; CHECK-LABEL: ret_v4i8:
@@ -33,19 +32,11 @@ define fastcc <8 x i32> @ret_v8i32(ptr %p) {
}
define fastcc <16 x i64> @ret_v16i64(ptr %p) {
-; LMULMAX8-LABEL: ret_v16i64:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; LMULMAX8-NEXT: vle64.v v8, (a0)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX4-LABEL: ret_v16i64:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; LMULMAX4-NEXT: vle64.v v8, (a0)
-; LMULMAX4-NEXT: addi a0, a0, 64
-; LMULMAX4-NEXT: vle64.v v12, (a0)
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: ret_v16i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: ret
%v = load <16 x i64>, ptr %p
ret <16 x i64> %v
}
@@ -73,86 +64,39 @@ define fastcc <32 x i1> @ret_mask_v32i1(ptr %p) {
; Return the vector via registers v8-v23
define fastcc <64 x i32> @ret_split_v64i32(ptr %x) {
-; LMULMAX8-LABEL: ret_split_v64i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a1, 32
-; LMULMAX8-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; LMULMAX8-NEXT: vle32.v v8, (a0)
-; LMULMAX8-NEXT: addi a0, a0, 128
-; LMULMAX8-NEXT: vle32.v v16, (a0)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX4-LABEL: ret_split_v64i32:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; LMULMAX4-NEXT: vle32.v v8, (a0)
-; LMULMAX4-NEXT: addi a1, a0, 64
-; LMULMAX4-NEXT: vle32.v v12, (a1)
-; LMULMAX4-NEXT: addi a1, a0, 128
-; LMULMAX4-NEXT: vle32.v v16, (a1)
-; LMULMAX4-NEXT: addi a0, a0, 192
-; LMULMAX4-NEXT: vle32.v v20, (a0)
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: ret_split_v64i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 32
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 128
+; CHECK-NEXT: vle32.v v16, (a0)
+; CHECK-NEXT: ret
%v = load <64 x i32>, ptr %x
ret <64 x i32> %v
}
; Return the vector fully via the stack
define fastcc <128 x i32> @ret_split_v128i32(ptr %x) {
-; LMULMAX8-LABEL: ret_split_v128i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: addi a2, a1, 128
-; LMULMAX8-NEXT: li a3, 32
-; LMULMAX8-NEXT: vsetvli zero, a3, e32, m8, ta, ma
-; LMULMAX8-NEXT: vle32.v v8, (a2)
-; LMULMAX8-NEXT: addi a2, a1, 256
-; LMULMAX8-NEXT: vle32.v v16, (a1)
-; LMULMAX8-NEXT: addi a1, a1, 384
-; LMULMAX8-NEXT: vle32.v v24, (a1)
-; LMULMAX8-NEXT: vle32.v v0, (a2)
-; LMULMAX8-NEXT: vse32.v v16, (a0)
-; LMULMAX8-NEXT: addi a1, a0, 384
-; LMULMAX8-NEXT: vse32.v v24, (a1)
-; LMULMAX8-NEXT: addi a1, a0, 256
-; LMULMAX8-NEXT: vse32.v v0, (a1)
-; LMULMAX8-NEXT: addi a0, a0, 128
-; LMULMAX8-NEXT: vse32.v v8, (a0)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX4-LABEL: ret_split_v128i32:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: addi a2, a1, 64
-; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; LMULMAX4-NEXT: vle32.v v8, (a2)
-; LMULMAX4-NEXT: addi a2, a1, 128
-; LMULMAX4-NEXT: vle32.v v12, (a2)
-; LMULMAX4-NEXT: addi a2, a1, 192
-; LMULMAX4-NEXT: vle32.v v16, (a2)
-; LMULMAX4-NEXT: addi a2, a1, 256
-; LMULMAX4-NEXT: vle32.v v20, (a2)
-; LMULMAX4-NEXT: addi a2, a1, 320
-; LMULMAX4-NEXT: vle32.v v24, (a2)
-; LMULMAX4-NEXT: addi a2, a1, 384
-; LMULMAX4-NEXT: vle32.v v28, (a1)
-; LMULMAX4-NEXT: addi a1, a1, 448
-; LMULMAX4-NEXT: vle32.v v0, (a1)
-; LMULMAX4-NEXT: vle32.v v4, (a2)
-; LMULMAX4-NEXT: vse32.v v28, (a0)
-; LMULMAX4-NEXT: addi a1, a0, 448
-; LMULMAX4-NEXT: vse32.v v0, (a1)
-; LMULMAX4-NEXT: addi a1, a0, 384
-; LMULMAX4-NEXT: vse32.v v4, (a1)
-; LMULMAX4-NEXT: addi a1, a0, 320
-; LMULMAX4-NEXT: vse32.v v24, (a1)
-; LMULMAX4-NEXT: addi a1, a0, 256
-; LMULMAX4-NEXT: vse32.v v20, (a1)
-; LMULMAX4-NEXT: addi a1, a0, 192
-; LMULMAX4-NEXT: vse32.v v16, (a1)
-; LMULMAX4-NEXT: addi a1, a0, 128
-; LMULMAX4-NEXT: vse32.v v12, (a1)
-; LMULMAX4-NEXT: addi a0, a0, 64
-; LMULMAX4-NEXT: vse32.v v8, (a0)
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: ret_split_v128i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a2, a1, 128
+; CHECK-NEXT: li a3, 32
+; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
+; CHECK-NEXT: vle32.v v8, (a2)
+; CHECK-NEXT: addi a2, a1, 256
+; CHECK-NEXT: vle32.v v16, (a1)
+; CHECK-NEXT: addi a1, a1, 384
+; CHECK-NEXT: vle32.v v24, (a1)
+; CHECK-NEXT: vle32.v v0, (a2)
+; CHECK-NEXT: vse32.v v16, (a0)
+; CHECK-NEXT: addi a1, a0, 384
+; CHECK-NEXT: vse32.v v24, (a1)
+; CHECK-NEXT: addi a1, a0, 256
+; CHECK-NEXT: vse32.v v0, (a1)
+; CHECK-NEXT: addi a0, a0, 128
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%v = load <128 x i32>, ptr %x
ret <128 x i32> %v
}
@@ -209,29 +153,15 @@ define fastcc <32 x i1> @ret_v32i1_param_v32i1_v32i1(<32 x i1> %v, <32 x i1> %w)
}
define fastcc <32 x i32> @ret_v32i32_param_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %w) {
-; LMULMAX8-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a2, 32
-; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, ma
-; LMULMAX8-NEXT: vle32.v v24, (a0)
-; LMULMAX8-NEXT: vadd.vv v8, v8, v16
-; LMULMAX8-NEXT: vadd.vv v8, v8, v24
-; LMULMAX8-NEXT: vadd.vx v8, v8, a1
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX4-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; LMULMAX4-NEXT: addi a1, a0, 64
-; LMULMAX4-NEXT: vle32.v v24, (a1)
-; LMULMAX4-NEXT: vle32.v v28, (a0)
-; LMULMAX4-NEXT: vadd.vv v8, v8, v16
-; LMULMAX4-NEXT: vadd.vv v12, v12, v20
-; LMULMAX4-NEXT: vadd.vv v12, v12, v24
-; LMULMAX4-NEXT: vadd.vv v8, v8, v28
-; LMULMAX4-NEXT: vadd.vx v8, v8, a2
-; LMULMAX4-NEXT: vadd.vx v12, v12, a2
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: vadd.vx v8, v8, a1
+; CHECK-NEXT: ret
%r = add <32 x i32> %x, %y
%s = add <32 x i32> %r, %z
%head = insertelement <32 x i32> poison, i32 %w, i32 0
@@ -244,98 +174,50 @@ declare <32 x i32> @ext2(<32 x i32>, <32 x i32>, i32, i32)
declare <32 x i32> @ext3(<32 x i32>, <32 x i32>, <32 x i32>, i32, i32)
define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %y, i32 %w) {
-; LMULMAX8-LABEL: ret_v32i32_call_v32i32_v32i32_i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: addi sp, sp, -16
-; LMULMAX8-NEXT: .cfi_def_cfa_offset 16
-; LMULMAX8-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; LMULMAX8-NEXT: .cfi_offset ra, -8
-; LMULMAX8-NEXT: vmv8r.v v24, v8
-; LMULMAX8-NEXT: li a1, 2
-; LMULMAX8-NEXT: vmv8r.v v8, v16
-; LMULMAX8-NEXT: vmv8r.v v16, v24
-; LMULMAX8-NEXT: call ext2
-; LMULMAX8-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; LMULMAX8-NEXT: addi sp, sp, 16
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX4-LABEL: ret_v32i32_call_v32i32_v32i32_i32:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: addi sp, sp, -16
-; LMULMAX4-NEXT: .cfi_def_cfa_offset 16
-; LMULMAX4-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; LMULMAX4-NEXT: .cfi_offset ra, -8
-; LMULMAX4-NEXT: vmv4r.v v24, v12
-; LMULMAX4-NEXT: vmv4r.v v28, v8
-; LMULMAX4-NEXT: li a1, 2
-; LMULMAX4-NEXT: vmv4r.v v8, v16
-; LMULMAX4-NEXT: vmv4r.v v12, v20
-; LMULMAX4-NEXT: vmv4r.v v16, v28
-; LMULMAX4-NEXT: vmv4r.v v20, v24
-; LMULMAX4-NEXT: call ext2
-; LMULMAX4-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; LMULMAX4-NEXT: addi sp, sp, 16
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: ret_v32i32_call_v32i32_v32i32_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT: .cfi_offset ra, -8
+; CHECK-NEXT: vmv8r.v v24, v8
+; CHECK-NEXT: li a1, 2
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: vmv8r.v v16, v24
+; CHECK-NEXT: call ext2
+; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
%t = call fastcc <32 x i32> @ext2(<32 x i32> %y, <32 x i32> %x, i32 %w, i32 2)
ret <32 x i32> %t
}
define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %w) {
-; LMULMAX8-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: addi sp, sp, -256
-; LMULMAX8-NEXT: .cfi_def_cfa_offset 256
-; LMULMAX8-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
-; LMULMAX8-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
-; LMULMAX8-NEXT: .cfi_offset ra, -8
-; LMULMAX8-NEXT: .cfi_offset s0, -16
-; LMULMAX8-NEXT: addi s0, sp, 256
-; LMULMAX8-NEXT: .cfi_def_cfa s0, 0
-; LMULMAX8-NEXT: andi sp, sp, -128
-; LMULMAX8-NEXT: li a2, 32
-; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, ma
-; LMULMAX8-NEXT: vle32.v v24, (a0)
-; LMULMAX8-NEXT: mv a3, sp
-; LMULMAX8-NEXT: mv a0, sp
-; LMULMAX8-NEXT: li a2, 42
-; LMULMAX8-NEXT: vse32.v v8, (a3)
-; LMULMAX8-NEXT: vmv.v.v v8, v24
-; LMULMAX8-NEXT: call ext3
-; LMULMAX8-NEXT: addi sp, s0, -256
-; LMULMAX8-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
-; LMULMAX8-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
-; LMULMAX8-NEXT: addi sp, sp, 256
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX4-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: addi sp, sp, -256
-; LMULMAX4-NEXT: .cfi_def_cfa_offset 256
-; LMULMAX4-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
-; LMULMAX4-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
-; LMULMAX4-NEXT: .cfi_offset ra, -8
-; LMULMAX4-NEXT: .cfi_offset s0, -16
-; LMULMAX4-NEXT: addi s0, sp, 256
-; LMULMAX4-NEXT: .cfi_def_cfa s0, 0
-; LMULMAX4-NEXT: andi sp, sp, -128
-; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; LMULMAX4-NEXT: vle32.v v24, (a0)
-; LMULMAX4-NEXT: addi a0, a0, 64
-; LMULMAX4-NEXT: vle32.v v28, (a0)
-; LMULMAX4-NEXT: addi a0, sp, 64
-; LMULMAX4-NEXT: vse32.v v12, (a0)
-; LMULMAX4-NEXT: mv a1, sp
-; LMULMAX4-NEXT: mv a0, sp
-; LMULMAX4-NEXT: li a3, 42
-; LMULMAX4-NEXT: vse32.v v8, (a1)
-; LMULMAX4-NEXT: vmv.v.v v8, v24
-; LMULMAX4-NEXT: vmv.v.v v12, v28
-; LMULMAX4-NEXT: call ext3
-; LMULMAX4-NEXT: addi sp, s0, -256
-; LMULMAX4-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
-; LMULMAX4-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
-; LMULMAX4-NEXT: addi sp, sp, 256
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -256
+; CHECK-NEXT: .cfi_def_cfa_offset 256
+; CHECK-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
+; CHECK-NEXT: .cfi_offset ra, -8
+; CHECK-NEXT: .cfi_offset s0, -16
+; CHECK-NEXT: addi s0, sp, 256
+; CHECK-NEXT: .cfi_def_cfa s0, 0
+; CHECK-NEXT: andi sp, sp, -128
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: mv a3, sp
+; CHECK-NEXT: mv a0, sp
+; CHECK-NEXT: li a2, 42
+; CHECK-NEXT: vse32.v v8, (a3)
+; CHECK-NEXT: vmv.v.v v8, v24
+; CHECK-NEXT: call ext3
+; CHECK-NEXT: addi sp, s0, -256
+; CHECK-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 256
+; CHECK-NEXT: ret
%t = call fastcc <32 x i32> @ext3(<32 x i32> %z, <32 x i32> %y, <32 x i32> %x, i32 %w, i32 42)
ret <32 x i32> %t
}
@@ -344,127 +226,67 @@ define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x
; stack, but with fastcc can pass indirectly with the extra GPR registers
; allowed.
define fastcc <32 x i32> @vector_arg_indirect_stack(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, <32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %8) {
-; LMULMAX8-LABEL: vector_arg_indirect_stack:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a0, 32
-; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; LMULMAX8-NEXT: vle32.v v16, (t2)
-; LMULMAX8-NEXT: vadd.vv v8, v8, v16
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX4-LABEL: vector_arg_indirect_stack:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: addi a0, t2, 64
-; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; LMULMAX4-NEXT: vle32.v v16, (t2)
-; LMULMAX4-NEXT: vle32.v v20, (a0)
-; LMULMAX4-NEXT: vadd.vv v8, v8, v16
-; LMULMAX4-NEXT: vadd.vv v12, v12, v20
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: vector_arg_indirect_stack:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vle32.v v16, (t2)
+; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: ret
%s = add <32 x i32> %x, %z
ret <32 x i32> %s
}
; Calling the function above. Ensure we pass the arguments correctly.
define fastcc <32 x i32> @pass_vector_arg_indirect_stack(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z) {
-; LMULMAX8-LABEL: pass_vector_arg_indirect_stack:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: addi sp, sp, -256
-; LMULMAX8-NEXT: .cfi_def_cfa_offset 256
-; LMULMAX8-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
-; LMULMAX8-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
-; LMULMAX8-NEXT: .cfi_offset ra, -8
-; LMULMAX8-NEXT: .cfi_offset s0, -16
-; LMULMAX8-NEXT: addi s0, sp, 256
-; LMULMAX8-NEXT: .cfi_def_cfa s0, 0
-; LMULMAX8-NEXT: andi sp, sp, -128
-; LMULMAX8-NEXT: li a0, 32
-; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; LMULMAX8-NEXT: vmv.v.i v8, 0
-; LMULMAX8-NEXT: mv a0, sp
-; LMULMAX8-NEXT: li a1, 1
-; LMULMAX8-NEXT: li a2, 2
-; LMULMAX8-NEXT: li a3, 3
-; LMULMAX8-NEXT: li a4, 4
-; LMULMAX8-NEXT: li a5, 5
-; LMULMAX8-NEXT: li a6, 6
-; LMULMAX8-NEXT: li a7, 7
-; LMULMAX8-NEXT: mv t2, sp
-; LMULMAX8-NEXT: li t3, 8
-; LMULMAX8-NEXT: vse32.v v8, (a0)
-; LMULMAX8-NEXT: li a0, 0
-; LMULMAX8-NEXT: vmv.v.i v16, 0
-; LMULMAX8-NEXT: call vector_arg_indirect_stack
-; LMULMAX8-NEXT: addi sp, s0, -256
-; LMULMAX8-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
-; LMULMAX8-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
-; LMULMAX8-NEXT: addi sp, sp, 256
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX4-LABEL: pass_vector_arg_indirect_stack:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: addi sp, sp, -256
-; LMULMAX4-NEXT: .cfi_def_cfa_offset 256
-; LMULMAX4-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
-; LMULMAX4-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
-; LMULMAX4-NEXT: .cfi_offset ra, -8
-; LMULMAX4-NEXT: .cfi_offset s0, -16
-; LMULMAX4-NEXT: addi s0, sp, 256
-; LMULMAX4-NEXT: .cfi_def_cfa s0, 0
-; LMULMAX4-NEXT: andi sp, sp, -128
-; LMULMAX4-NEXT: addi a0, sp, 64
-; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; LMULMAX4-NEXT: vmv.v.i v8, 0
-; LMULMAX4-NEXT: vse32.v v8, (a0)
-; LMULMAX4-NEXT: mv a0, sp
-; LMULMAX4-NEXT: li a1, 1
-; LMULMAX4-NEXT: li a2, 2
-; LMULMAX4-NEXT: li a3, 3
-; LMULMAX4-NEXT: li a4, 4
-; LMULMAX4-NEXT: li a5, 5
-; LMULMAX4-NEXT: li a6, 6
-; LMULMAX4-NEXT: li a7, 7
-; LMULMAX4-NEXT: mv t2, sp
-; LMULMAX4-NEXT: li t4, 8
-; LMULMAX4-NEXT: vse32.v v8, (a0)
-; LMULMAX4-NEXT: li a0, 0
-; LMULMAX4-NEXT: vmv.v.i v12, 0
-; LMULMAX4-NEXT: vmv.v.i v16, 0
-; LMULMAX4-NEXT: vmv.v.i v20, 0
-; LMULMAX4-NEXT: call vector_arg_indirect_stack
-; LMULMAX4-NEXT: addi sp, s0, -256
-; LMULMAX4-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
-; LMULMAX4-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
-; LMULMAX4-NEXT: addi sp, sp, 256
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: pass_vector_arg_indirect_stack:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -256
+; CHECK-NEXT: .cfi_def_cfa_offset 256
+; CHECK-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
+; CHECK-NEXT: .cfi_offset ra, -8
+; CHECK-NEXT: .cfi_offset s0, -16
+; CHECK-NEXT: addi s0, sp, 256
+; CHECK-NEXT: .cfi_def_cfa s0, 0
+; CHECK-NEXT: andi sp, sp, -128
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: mv a0, sp
+; CHECK-NEXT: li a1, 1
+; CHECK-NEXT: li a2, 2
+; CHECK-NEXT: li a3, 3
+; CHECK-NEXT: li a4, 4
+; CHECK-NEXT: li a5, 5
+; CHECK-NEXT: li a6, 6
+; CHECK-NEXT: li a7, 7
+; CHECK-NEXT: mv t2, sp
+; CHECK-NEXT: li t3, 8
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: li a0, 0
+; CHECK-NEXT: vmv.v.i v16, 0
+; CHECK-NEXT: call vector_arg_indirect_stack
+; CHECK-NEXT: addi sp, s0, -256
+; CHECK-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 256
+; CHECK-NEXT: ret
%s = call fastcc <32 x i32> @vector_arg_indirect_stack(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, i32 8)
ret <32 x i32> %s
}
; A pathological test case where even with fastcc we must use the stack for arguments %13 and %z
define fastcc <32 x i32> @vector_arg_direct_stack(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12, i32 %13, <32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %last) {
-; LMULMAX8-LABEL: vector_arg_direct_stack:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a0, 32
-; LMULMAX8-NEXT: addi a1, sp, 8
-; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; LMULMAX8-NEXT: vle32.v v24, (a1)
-; LMULMAX8-NEXT: vadd.vv v8, v8, v16
-; LMULMAX8-NEXT: vadd.vv v8, v8, v24
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX4-LABEL: vector_arg_direct_stack:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; LMULMAX4-NEXT: addi a0, sp, 8
-; LMULMAX4-NEXT: vle32.v v24, (a0)
-; LMULMAX4-NEXT: addi a0, sp, 72
-; LMULMAX4-NEXT: vle32.v v28, (a0)
-; LMULMAX4-NEXT: vadd.vv v12, v12, v20
-; LMULMAX4-NEXT: vadd.vv v8, v8, v16
-; LMULMAX4-NEXT: vadd.vv v8, v8, v24
-; LMULMAX4-NEXT: vadd.vv v12, v12, v28
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: vector_arg_direct_stack:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: addi a1, sp, 8
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vle32.v v24, (a1)
+; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: ret
%s = add <32 x i32> %x, %y
%t = add <32 x i32> %s, %z
ret <32 x i32> %t
@@ -472,76 +294,39 @@ define fastcc <32 x i32> @vector_arg_direct_stack(i32 %0, i32 %1, i32 %2, i32 %3
; Calling the function above. Ensure we pass the arguments correctly.
define fastcc <32 x i32> @pass_vector_arg_direct_stack(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z) {
-; LMULMAX8-LABEL: pass_vector_arg_direct_stack:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: addi sp, sp, -160
-; LMULMAX8-NEXT: .cfi_def_cfa_offset 160
-; LMULMAX8-NEXT: sd ra, 152(sp) # 8-byte Folded Spill
-; LMULMAX8-NEXT: .cfi_offset ra, -8
-; LMULMAX8-NEXT: li a0, 32
-; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; LMULMAX8-NEXT: vmv.v.i v8, 0
-; LMULMAX8-NEXT: addi a0, sp, 8
-; LMULMAX8-NEXT: vse32.v v8, (a0)
-; LMULMAX8-NEXT: li a0, 1
-; LMULMAX8-NEXT: sd a0, 136(sp)
-; LMULMAX8-NEXT: li a0, 13
-; LMULMAX8-NEXT: li a1, 1
-; LMULMAX8-NEXT: li a2, 2
-; LMULMAX8-NEXT: li a3, 3
-; LMULMAX8-NEXT: li a4, 4
-; LMULMAX8-NEXT: li a5, 5
-; LMULMAX8-NEXT: li a6, 6
-; LMULMAX8-NEXT: li a7, 7
-; LMULMAX8-NEXT: li t2, 8
-; LMULMAX8-NEXT: li t3, 9
-; LMULMAX8-NEXT: li t4, 10
-; LMULMAX8-NEXT: li t5, 11
-; LMULMAX8-NEXT: li t6, 12
-; LMULMAX8-NEXT: sd a0, 0(sp)
-; LMULMAX8-NEXT: li a0, 0
-; LMULMAX8-NEXT: vmv.v.i v16, 0
-; LMULMAX8-NEXT: call vector_arg_direct_stack
-; LMULMAX8-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
-; LMULMAX8-NEXT: addi sp, sp, 160
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX4-LABEL: pass_vector_arg_direct_stack:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: addi sp, sp, -160
-; LMULMAX4-NEXT: .cfi_def_cfa_offset 160
-; LMULMAX4-NEXT: sd ra, 152(sp) # 8-byte Folded Spill
-; LMULMAX4-NEXT: .cfi_offset ra, -8
-; LMULMAX4-NEXT: li a0, 1
-; LMULMAX4-NEXT: sd a0, 136(sp)
-; LMULMAX4-NEXT: li a0, 13
-; LMULMAX4-NEXT: sd a0, 0(sp)
-; LMULMAX4-NEXT: addi a0, sp, 72
-; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; LMULMAX4-NEXT: vmv.v.i v8, 0
-; LMULMAX4-NEXT: vse32.v v8, (a0)
-; LMULMAX4-NEXT: addi a0, sp, 8
-; LMULMAX4-NEXT: li a1, 1
-; LMULMAX4-NEXT: li a2, 2
-; LMULMAX4-NEXT: li a3, 3
-; LMULMAX4-NEXT: li a4, 4
-; LMULMAX4-NEXT: li a5, 5
-; LMULMAX4-NEXT: li a6, 6
-; LMULMAX4-NEXT: li a7, 7
-; LMULMAX4-NEXT: li t2, 8
-; LMULMAX4-NEXT: li t3, 9
-; LMULMAX4-NEXT: li t4, 10
-; LMULMAX4-NEXT: li t5, 11
-; LMULMAX4-NEXT: li t6, 12
-; LMULMAX4-NEXT: vse32.v v8, (a0)
-; LMULMAX4-NEXT: li a0, 0
-; LMULMAX4-NEXT: vmv.v.i v12, 0
-; LMULMAX4-NEXT: vmv.v.i v16, 0
-; LMULMAX4-NEXT: vmv.v.i v20, 0
-; LMULMAX4-NEXT: call vector_arg_direct_stack
-; LMULMAX4-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
-; LMULMAX4-NEXT: addi sp, sp, 160
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: pass_vector_arg_direct_stack:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -160
+; CHECK-NEXT: .cfi_def_cfa_offset 160
+; CHECK-NEXT: sd ra, 152(sp) # 8-byte Folded Spill
+; CHECK-NEXT: .cfi_offset ra, -8
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: addi a0, sp, 8
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: li a0, 1
+; CHECK-NEXT: sd a0, 136(sp)
+; CHECK-NEXT: li a0, 13
+; CHECK-NEXT: li a1, 1
+; CHECK-NEXT: li a2, 2
+; CHECK-NEXT: li a3, 3
+; CHECK-NEXT: li a4, 4
+; CHECK-NEXT: li a5, 5
+; CHECK-NEXT: li a6, 6
+; CHECK-NEXT: li a7, 7
+; CHECK-NEXT: li t2, 8
+; CHECK-NEXT: li t3, 9
+; CHECK-NEXT: li t4, 10
+; CHECK-NEXT: li t5, 11
+; CHECK-NEXT: li t6, 12
+; CHECK-NEXT: sd a0, 0(sp)
+; CHECK-NEXT: li a0, 0
+; CHECK-NEXT: vmv.v.i v16, 0
+; CHECK-NEXT: call vector_arg_direct_stack
+; CHECK-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 160
+; CHECK-NEXT: ret
%s = call fastcc <32 x i32> @vector_arg_direct_stack(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, i32 1)
ret <32 x i32> %s
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
index 8e3a432..3286c33 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
@@ -1,8 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8
-; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -riscv-v-fixed-length-vector-lmul-max=4 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX4
-; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2
-; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -verify-machineinstrs < %s | FileCheck %s
define <4 x i8> @ret_v4i8(ptr %p) {
; CHECK-LABEL: ret_v4i8:
@@ -25,81 +22,21 @@ define <4 x i32> @ret_v4i32(ptr %p) {
}
define <8 x i32> @ret_v8i32(ptr %p) {
-; LMULMAX8-LABEL: ret_v8i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX8-NEXT: vle32.v v8, (a0)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX4-LABEL: ret_v8i32:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX4-NEXT: vle32.v v8, (a0)
-; LMULMAX4-NEXT: ret
-;
-; LMULMAX2-LABEL: ret_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: ret_v8i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vle32.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vle32.v v9, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: ret_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: ret
%v = load <8 x i32>, ptr %p
ret <8 x i32> %v
}
define <16 x i64> @ret_v16i64(ptr %p) {
-; LMULMAX8-LABEL: ret_v16i64:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; LMULMAX8-NEXT: vle64.v v8, (a0)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX4-LABEL: ret_v16i64:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; LMULMAX4-NEXT: vle64.v v8, (a0)
-; LMULMAX4-NEXT: addi a0, a0, 64
-; LMULMAX4-NEXT: vle64.v v12, (a0)
-; LMULMAX4-NEXT: ret
-;
-; LMULMAX2-LABEL: ret_v16i64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: addi a1, a0, 32
-; LMULMAX2-NEXT: vle64.v v10, (a1)
-; LMULMAX2-NEXT: addi a1, a0, 64
-; LMULMAX2-NEXT: vle64.v v12, (a1)
-; LMULMAX2-NEXT: addi a0, a0, 96
-; LMULMAX2-NEXT: vle64.v v14, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: ret_v16i64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vle64.v v8, (a0)
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vle64.v v9, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 32
-; LMULMAX1-NEXT: vle64.v v10, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 48
-; LMULMAX1-NEXT: vle64.v v11, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 64
-; LMULMAX1-NEXT: vle64.v v12, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 80
-; LMULMAX1-NEXT: vle64.v v13, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 96
-; LMULMAX1-NEXT: vle64.v v14, (a1)
-; LMULMAX1-NEXT: addi a0, a0, 112
-; LMULMAX1-NEXT: vle64.v v15, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: ret_v16i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: ret
%v = load <16 x i64>, ptr %p
ret <16 x i64> %v
}
@@ -115,374 +52,51 @@ define <8 x i1> @ret_mask_v8i1(ptr %p) {
}
define <32 x i1> @ret_mask_v32i1(ptr %p) {
-; LMULMAX8-LABEL: ret_mask_v32i1:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a1, 32
-; LMULMAX8-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; LMULMAX8-NEXT: vlm.v v0, (a0)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX4-LABEL: ret_mask_v32i1:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: li a1, 32
-; LMULMAX4-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; LMULMAX4-NEXT: vlm.v v0, (a0)
-; LMULMAX4-NEXT: ret
-;
-; LMULMAX2-LABEL: ret_mask_v32i1:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a1, 32
-; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; LMULMAX2-NEXT: vlm.v v0, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: ret_mask_v32i1:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vlm.v v0, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 2
-; LMULMAX1-NEXT: vlm.v v8, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: ret_mask_v32i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 32
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vlm.v v0, (a0)
+; CHECK-NEXT: ret
%v = load <32 x i1>, ptr %p
ret <32 x i1> %v
}
; Return the vector via registers v8-v23
define <64 x i32> @ret_split_v64i32(ptr %x) {
-; LMULMAX8-LABEL: ret_split_v64i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a1, 32
-; LMULMAX8-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; LMULMAX8-NEXT: vle32.v v8, (a0)
-; LMULMAX8-NEXT: addi a0, a0, 128
-; LMULMAX8-NEXT: vle32.v v16, (a0)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX4-LABEL: ret_split_v64i32:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; LMULMAX4-NEXT: vle32.v v8, (a0)
-; LMULMAX4-NEXT: addi a1, a0, 64
-; LMULMAX4-NEXT: vle32.v v12, (a1)
-; LMULMAX4-NEXT: addi a1, a0, 128
-; LMULMAX4-NEXT: vle32.v v16, (a1)
-; LMULMAX4-NEXT: addi a0, a0, 192
-; LMULMAX4-NEXT: vle32.v v20, (a0)
-; LMULMAX4-NEXT: ret
-;
-; LMULMAX2-LABEL: ret_split_v64i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: addi a1, a0, 32
-; LMULMAX2-NEXT: vle32.v v10, (a1)
-; LMULMAX2-NEXT: addi a1, a0, 64
-; LMULMAX2-NEXT: vle32.v v12, (a1)
-; LMULMAX2-NEXT: addi a1, a0, 96
-; LMULMAX2-NEXT: vle32.v v14, (a1)
-; LMULMAX2-NEXT: addi a1, a0, 128
-; LMULMAX2-NEXT: vle32.v v16, (a1)
-; LMULMAX2-NEXT: addi a1, a0, 160
-; LMULMAX2-NEXT: vle32.v v18, (a1)
-; LMULMAX2-NEXT: addi a1, a0, 192
-; LMULMAX2-NEXT: vle32.v v20, (a1)
-; LMULMAX2-NEXT: addi a0, a0, 224
-; LMULMAX2-NEXT: vle32.v v22, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: ret_split_v64i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vle32.v v8, (a0)
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vle32.v v9, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 32
-; LMULMAX1-NEXT: vle32.v v10, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 48
-; LMULMAX1-NEXT: vle32.v v11, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 64
-; LMULMAX1-NEXT: vle32.v v12, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 80
-; LMULMAX1-NEXT: vle32.v v13, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 96
-; LMULMAX1-NEXT: vle32.v v14, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 112
-; LMULMAX1-NEXT: vle32.v v15, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 128
-; LMULMAX1-NEXT: vle32.v v16, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 144
-; LMULMAX1-NEXT: vle32.v v17, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 160
-; LMULMAX1-NEXT: vle32.v v18, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 176
-; LMULMAX1-NEXT: vle32.v v19, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 192
-; LMULMAX1-NEXT: vle32.v v20, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 208
-; LMULMAX1-NEXT: vle32.v v21, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 224
-; LMULMAX1-NEXT: vle32.v v22, (a1)
-; LMULMAX1-NEXT: addi a0, a0, 240
-; LMULMAX1-NEXT: vle32.v v23, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: ret_split_v64i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 32
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 128
+; CHECK-NEXT: vle32.v v16, (a0)
+; CHECK-NEXT: ret
%v = load <64 x i32>, ptr %x
ret <64 x i32> %v
}
; Return the vector fully via the stack
define <128 x i32> @ret_split_v128i32(ptr %x) {
-; LMULMAX8-LABEL: ret_split_v128i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: addi a2, a1, 128
-; LMULMAX8-NEXT: li a3, 32
-; LMULMAX8-NEXT: vsetvli zero, a3, e32, m8, ta, ma
-; LMULMAX8-NEXT: vle32.v v8, (a2)
-; LMULMAX8-NEXT: addi a2, a1, 256
-; LMULMAX8-NEXT: vle32.v v16, (a1)
-; LMULMAX8-NEXT: addi a1, a1, 384
-; LMULMAX8-NEXT: vle32.v v24, (a1)
-; LMULMAX8-NEXT: vle32.v v0, (a2)
-; LMULMAX8-NEXT: vse32.v v16, (a0)
-; LMULMAX8-NEXT: addi a1, a0, 384
-; LMULMAX8-NEXT: vse32.v v24, (a1)
-; LMULMAX8-NEXT: addi a1, a0, 256
-; LMULMAX8-NEXT: vse32.v v0, (a1)
-; LMULMAX8-NEXT: addi a0, a0, 128
-; LMULMAX8-NEXT: vse32.v v8, (a0)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX4-LABEL: ret_split_v128i32:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: addi a2, a1, 64
-; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; LMULMAX4-NEXT: vle32.v v8, (a2)
-; LMULMAX4-NEXT: addi a2, a1, 128
-; LMULMAX4-NEXT: vle32.v v12, (a2)
-; LMULMAX4-NEXT: addi a2, a1, 192
-; LMULMAX4-NEXT: vle32.v v16, (a2)
-; LMULMAX4-NEXT: addi a2, a1, 256
-; LMULMAX4-NEXT: vle32.v v20, (a2)
-; LMULMAX4-NEXT: addi a2, a1, 320
-; LMULMAX4-NEXT: vle32.v v24, (a2)
-; LMULMAX4-NEXT: addi a2, a1, 384
-; LMULMAX4-NEXT: vle32.v v28, (a1)
-; LMULMAX4-NEXT: addi a1, a1, 448
-; LMULMAX4-NEXT: vle32.v v0, (a1)
-; LMULMAX4-NEXT: vle32.v v4, (a2)
-; LMULMAX4-NEXT: vse32.v v28, (a0)
-; LMULMAX4-NEXT: addi a1, a0, 448
-; LMULMAX4-NEXT: vse32.v v0, (a1)
-; LMULMAX4-NEXT: addi a1, a0, 384
-; LMULMAX4-NEXT: vse32.v v4, (a1)
-; LMULMAX4-NEXT: addi a1, a0, 320
-; LMULMAX4-NEXT: vse32.v v24, (a1)
-; LMULMAX4-NEXT: addi a1, a0, 256
-; LMULMAX4-NEXT: vse32.v v20, (a1)
-; LMULMAX4-NEXT: addi a1, a0, 192
-; LMULMAX4-NEXT: vse32.v v16, (a1)
-; LMULMAX4-NEXT: addi a1, a0, 128
-; LMULMAX4-NEXT: vse32.v v12, (a1)
-; LMULMAX4-NEXT: addi a0, a0, 64
-; LMULMAX4-NEXT: vse32.v v8, (a0)
-; LMULMAX4-NEXT: ret
-;
-; LMULMAX2-LABEL: ret_split_v128i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, a1, 32
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a2)
-; LMULMAX2-NEXT: addi a2, a1, 64
-; LMULMAX2-NEXT: vle32.v v10, (a2)
-; LMULMAX2-NEXT: addi a2, a1, 96
-; LMULMAX2-NEXT: vle32.v v12, (a2)
-; LMULMAX2-NEXT: addi a2, a1, 128
-; LMULMAX2-NEXT: vle32.v v14, (a2)
-; LMULMAX2-NEXT: addi a2, a1, 160
-; LMULMAX2-NEXT: vle32.v v16, (a2)
-; LMULMAX2-NEXT: addi a2, a1, 192
-; LMULMAX2-NEXT: vle32.v v18, (a2)
-; LMULMAX2-NEXT: addi a2, a1, 224
-; LMULMAX2-NEXT: vle32.v v20, (a2)
-; LMULMAX2-NEXT: addi a2, a1, 256
-; LMULMAX2-NEXT: vle32.v v22, (a2)
-; LMULMAX2-NEXT: addi a2, a1, 288
-; LMULMAX2-NEXT: vle32.v v24, (a2)
-; LMULMAX2-NEXT: addi a2, a1, 320
-; LMULMAX2-NEXT: vle32.v v26, (a2)
-; LMULMAX2-NEXT: addi a2, a1, 352
-; LMULMAX2-NEXT: vle32.v v28, (a2)
-; LMULMAX2-NEXT: addi a2, a1, 384
-; LMULMAX2-NEXT: vle32.v v30, (a2)
-; LMULMAX2-NEXT: addi a2, a1, 416
-; LMULMAX2-NEXT: vle32.v v0, (a2)
-; LMULMAX2-NEXT: addi a2, a1, 448
-; LMULMAX2-NEXT: vle32.v v2, (a1)
-; LMULMAX2-NEXT: addi a1, a1, 480
-; LMULMAX2-NEXT: vle32.v v4, (a1)
-; LMULMAX2-NEXT: vle32.v v6, (a2)
-; LMULMAX2-NEXT: vse32.v v2, (a0)
-; LMULMAX2-NEXT: addi a1, a0, 480
-; LMULMAX2-NEXT: vse32.v v4, (a1)
-; LMULMAX2-NEXT: addi a1, a0, 448
-; LMULMAX2-NEXT: vse32.v v6, (a1)
-; LMULMAX2-NEXT: addi a1, a0, 416
-; LMULMAX2-NEXT: vse32.v v0, (a1)
-; LMULMAX2-NEXT: addi a1, a0, 384
-; LMULMAX2-NEXT: vse32.v v30, (a1)
-; LMULMAX2-NEXT: addi a1, a0, 352
-; LMULMAX2-NEXT: vse32.v v28, (a1)
-; LMULMAX2-NEXT: addi a1, a0, 320
-; LMULMAX2-NEXT: vse32.v v26, (a1)
-; LMULMAX2-NEXT: addi a1, a0, 288
-; LMULMAX2-NEXT: vse32.v v24, (a1)
-; LMULMAX2-NEXT: addi a1, a0, 256
-; LMULMAX2-NEXT: vse32.v v22, (a1)
-; LMULMAX2-NEXT: addi a1, a0, 224
-; LMULMAX2-NEXT: vse32.v v20, (a1)
-; LMULMAX2-NEXT: addi a1, a0, 192
-; LMULMAX2-NEXT: vse32.v v18, (a1)
-; LMULMAX2-NEXT: addi a1, a0, 160
-; LMULMAX2-NEXT: vse32.v v16, (a1)
-; LMULMAX2-NEXT: addi a1, a0, 128
-; LMULMAX2-NEXT: vse32.v v14, (a1)
-; LMULMAX2-NEXT: addi a1, a0, 96
-; LMULMAX2-NEXT: vse32.v v12, (a1)
-; LMULMAX2-NEXT: addi a1, a0, 64
-; LMULMAX2-NEXT: vse32.v v10, (a1)
-; LMULMAX2-NEXT: addi a0, a0, 32
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: ret_split_v128i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a2, a1, 16
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vle32.v v8, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 32
-; LMULMAX1-NEXT: vle32.v v9, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 48
-; LMULMAX1-NEXT: vle32.v v10, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 64
-; LMULMAX1-NEXT: vle32.v v11, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 80
-; LMULMAX1-NEXT: vle32.v v12, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 96
-; LMULMAX1-NEXT: vle32.v v13, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 112
-; LMULMAX1-NEXT: vle32.v v14, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 128
-; LMULMAX1-NEXT: vle32.v v15, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 144
-; LMULMAX1-NEXT: vle32.v v16, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 160
-; LMULMAX1-NEXT: vle32.v v17, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 176
-; LMULMAX1-NEXT: vle32.v v18, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 192
-; LMULMAX1-NEXT: vle32.v v19, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 208
-; LMULMAX1-NEXT: vle32.v v20, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 224
-; LMULMAX1-NEXT: vle32.v v21, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 240
-; LMULMAX1-NEXT: vle32.v v22, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 256
-; LMULMAX1-NEXT: vle32.v v23, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 272
-; LMULMAX1-NEXT: vle32.v v24, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 288
-; LMULMAX1-NEXT: vle32.v v25, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 304
-; LMULMAX1-NEXT: vle32.v v26, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 320
-; LMULMAX1-NEXT: vle32.v v27, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 336
-; LMULMAX1-NEXT: vle32.v v28, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 352
-; LMULMAX1-NEXT: vle32.v v29, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 368
-; LMULMAX1-NEXT: vle32.v v30, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 384
-; LMULMAX1-NEXT: vle32.v v31, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 400
-; LMULMAX1-NEXT: vle32.v v0, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 416
-; LMULMAX1-NEXT: vle32.v v1, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 432
-; LMULMAX1-NEXT: vle32.v v2, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 448
-; LMULMAX1-NEXT: vle32.v v3, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 464
-; LMULMAX1-NEXT: vle32.v v4, (a2)
-; LMULMAX1-NEXT: addi a2, a1, 480
-; LMULMAX1-NEXT: vle32.v v5, (a1)
-; LMULMAX1-NEXT: addi a1, a1, 496
-; LMULMAX1-NEXT: vle32.v v6, (a1)
-; LMULMAX1-NEXT: vle32.v v7, (a2)
-; LMULMAX1-NEXT: vse32.v v5, (a0)
-; LMULMAX1-NEXT: addi a1, a0, 496
-; LMULMAX1-NEXT: vse32.v v6, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 480
-; LMULMAX1-NEXT: vse32.v v7, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 464
-; LMULMAX1-NEXT: vse32.v v4, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 448
-; LMULMAX1-NEXT: vse32.v v3, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 432
-; LMULMAX1-NEXT: vse32.v v2, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 416
-; LMULMAX1-NEXT: vse32.v v1, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 400
-; LMULMAX1-NEXT: vse32.v v0, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 384
-; LMULMAX1-NEXT: vse32.v v31, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 368
-; LMULMAX1-NEXT: vse32.v v30, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 352
-; LMULMAX1-NEXT: vse32.v v29, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 336
-; LMULMAX1-NEXT: vse32.v v28, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 320
-; LMULMAX1-NEXT: vse32.v v27, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 304
-; LMULMAX1-NEXT: vse32.v v26, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 288
-; LMULMAX1-NEXT: vse32.v v25, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 272
-; LMULMAX1-NEXT: vse32.v v24, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 256
-; LMULMAX1-NEXT: vse32.v v23, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 240
-; LMULMAX1-NEXT: vse32.v v22, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 224
-; LMULMAX1-NEXT: vse32.v v21, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 208
-; LMULMAX1-NEXT: vse32.v v20, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 192
-; LMULMAX1-NEXT: vse32.v v19, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 176
-; LMULMAX1-NEXT: vse32.v v18, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 160
-; LMULMAX1-NEXT: vse32.v v17, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 144
-; LMULMAX1-NEXT: vse32.v v16, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 128
-; LMULMAX1-NEXT: vse32.v v15, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 112
-; LMULMAX1-NEXT: vse32.v v14, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 96
-; LMULMAX1-NEXT: vse32.v v13, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 80
-; LMULMAX1-NEXT: vse32.v v12, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 64
-; LMULMAX1-NEXT: vse32.v v11, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 48
-; LMULMAX1-NEXT: vse32.v v10, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 32
-; LMULMAX1-NEXT: vse32.v v9, (a1)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: ret_split_v128i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a2, a1, 128
+; CHECK-NEXT: li a3, 32
+; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
+; CHECK-NEXT: vle32.v v8, (a2)
+; CHECK-NEXT: addi a2, a1, 256
+; CHECK-NEXT: vle32.v v16, (a1)
+; CHECK-NEXT: addi a1, a1, 384
+; CHECK-NEXT: vle32.v v24, (a1)
+; CHECK-NEXT: vle32.v v0, (a2)
+; CHECK-NEXT: vse32.v v16, (a0)
+; CHECK-NEXT: addi a1, a0, 384
+; CHECK-NEXT: vse32.v v24, (a1)
+; CHECK-NEXT: addi a1, a0, 256
+; CHECK-NEXT: vse32.v v0, (a1)
+; CHECK-NEXT: addi a0, a0, 128
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%v = load <128 x i32>, ptr %x
ret <128 x i32> %v
}
@@ -508,30 +122,11 @@ define <4 x i8> @ret_v4i8_param_v4i8_v4i8(<4 x i8> %v, <4 x i8> %w) {
}
define <4 x i64> @ret_v4i64_param_v4i64_v4i64(<4 x i64> %v, <4 x i64> %w) {
-; LMULMAX8-LABEL: ret_v4i64_param_v4i64_v4i64:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX8-NEXT: vadd.vv v8, v8, v10
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX4-LABEL: ret_v4i64_param_v4i64_v4i64:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX4-NEXT: vadd.vv v8, v8, v10
-; LMULMAX4-NEXT: ret
-;
-; LMULMAX2-LABEL: ret_v4i64_param_v4i64_v4i64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: ret_v4i64_param_v4i64_v4i64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vadd.vv v8, v8, v10
-; LMULMAX1-NEXT: vadd.vv v9, v9, v11
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: ret_v4i64_param_v4i64_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: ret
%r = add <4 x i64> %v, %w
ret <4 x i64> %r
}
@@ -547,130 +142,26 @@ define <8 x i1> @ret_v8i1_param_v8i1_v8i1(<8 x i1> %v, <8 x i1> %w) {
}
define <32 x i1> @ret_v32i1_param_v32i1_v32i1(<32 x i1> %v, <32 x i1> %w) {
-; LMULMAX8-LABEL: ret_v32i1_param_v32i1_v32i1:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a0, 32
-; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; LMULMAX8-NEXT: vmand.mm v0, v0, v8
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX4-LABEL: ret_v32i1_param_v32i1_v32i1:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: li a0, 32
-; LMULMAX4-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; LMULMAX4-NEXT: vmand.mm v0, v0, v8
-; LMULMAX4-NEXT: ret
-;
-; LMULMAX2-LABEL: ret_v32i1_param_v32i1_v32i1:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a0, 32
-; LMULMAX2-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; LMULMAX2-NEXT: vmand.mm v0, v0, v8
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: ret_v32i1_param_v32i1_v32i1:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vmand.mm v0, v0, v9
-; LMULMAX1-NEXT: vmand.mm v8, v8, v10
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: ret_v32i1_param_v32i1_v32i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vmand.mm v0, v0, v8
+; CHECK-NEXT: ret
%r = and <32 x i1> %v, %w
ret <32 x i1> %r
}
define <32 x i32> @ret_v32i32_param_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %w) {
-; LMULMAX8-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a2, 32
-; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, ma
-; LMULMAX8-NEXT: vle32.v v24, (a0)
-; LMULMAX8-NEXT: vadd.vv v8, v8, v16
-; LMULMAX8-NEXT: vadd.vv v8, v8, v24
-; LMULMAX8-NEXT: vadd.vx v8, v8, a1
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX4-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; LMULMAX4-NEXT: addi a1, a0, 64
-; LMULMAX4-NEXT: vle32.v v24, (a1)
-; LMULMAX4-NEXT: vle32.v v28, (a0)
-; LMULMAX4-NEXT: vadd.vv v8, v8, v16
-; LMULMAX4-NEXT: vadd.vv v12, v12, v20
-; LMULMAX4-NEXT: vadd.vv v12, v12, v24
-; LMULMAX4-NEXT: vadd.vv v8, v8, v28
-; LMULMAX4-NEXT: vadd.vx v8, v8, a2
-; LMULMAX4-NEXT: vadd.vx v12, v12, a2
-; LMULMAX4-NEXT: ret
-;
-; LMULMAX2-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v24, (a0)
-; LMULMAX2-NEXT: addi a1, a0, 32
-; LMULMAX2-NEXT: vle32.v v26, (a1)
-; LMULMAX2-NEXT: addi a1, a0, 64
-; LMULMAX2-NEXT: vle32.v v28, (a1)
-; LMULMAX2-NEXT: addi a0, a0, 96
-; LMULMAX2-NEXT: vle32.v v30, (a0)
-; LMULMAX2-NEXT: vadd.vv v8, v8, v16
-; LMULMAX2-NEXT: vadd.vv v10, v10, v18
-; LMULMAX2-NEXT: vadd.vv v12, v12, v20
-; LMULMAX2-NEXT: vadd.vv v14, v14, v22
-; LMULMAX2-NEXT: vadd.vv v14, v14, v30
-; LMULMAX2-NEXT: vadd.vv v12, v12, v28
-; LMULMAX2-NEXT: vadd.vv v10, v10, v26
-; LMULMAX2-NEXT: vadd.vv v8, v8, v24
-; LMULMAX2-NEXT: vadd.vx v8, v8, a4
-; LMULMAX2-NEXT: vadd.vx v10, v10, a4
-; LMULMAX2-NEXT: vadd.vx v12, v12, a4
-; LMULMAX2-NEXT: vadd.vx v14, v14, a4
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vle32.v v24, (a0)
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vle32.v v25, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 32
-; LMULMAX1-NEXT: vle32.v v26, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 48
-; LMULMAX1-NEXT: vle32.v v27, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 64
-; LMULMAX1-NEXT: vle32.v v28, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 80
-; LMULMAX1-NEXT: vle32.v v29, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 96
-; LMULMAX1-NEXT: vle32.v v30, (a1)
-; LMULMAX1-NEXT: addi a0, a0, 112
-; LMULMAX1-NEXT: vle32.v v31, (a0)
-; LMULMAX1-NEXT: lw a0, 0(sp)
-; LMULMAX1-NEXT: vadd.vv v8, v8, v16
-; LMULMAX1-NEXT: vadd.vv v9, v9, v17
-; LMULMAX1-NEXT: vadd.vv v10, v10, v18
-; LMULMAX1-NEXT: vadd.vv v11, v11, v19
-; LMULMAX1-NEXT: vadd.vv v12, v12, v20
-; LMULMAX1-NEXT: vadd.vv v13, v13, v21
-; LMULMAX1-NEXT: vadd.vv v14, v14, v22
-; LMULMAX1-NEXT: vadd.vv v15, v15, v23
-; LMULMAX1-NEXT: vadd.vv v15, v15, v31
-; LMULMAX1-NEXT: vadd.vv v14, v14, v30
-; LMULMAX1-NEXT: vadd.vv v13, v13, v29
-; LMULMAX1-NEXT: vadd.vv v12, v12, v28
-; LMULMAX1-NEXT: vadd.vv v11, v11, v27
-; LMULMAX1-NEXT: vadd.vv v10, v10, v26
-; LMULMAX1-NEXT: vadd.vv v9, v9, v25
-; LMULMAX1-NEXT: vadd.vv v8, v8, v24
-; LMULMAX1-NEXT: vadd.vx v8, v8, a0
-; LMULMAX1-NEXT: vadd.vx v9, v9, a0
-; LMULMAX1-NEXT: vadd.vx v10, v10, a0
-; LMULMAX1-NEXT: vadd.vx v11, v11, a0
-; LMULMAX1-NEXT: vadd.vx v12, v12, a0
-; LMULMAX1-NEXT: vadd.vx v13, v13, a0
-; LMULMAX1-NEXT: vadd.vx v14, v14, a0
-; LMULMAX1-NEXT: vadd.vx v15, v15, a0
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: vadd.vx v8, v8, a1
+; CHECK-NEXT: ret
%r = add <32 x i32> %x, %y
%s = add <32 x i32> %r, %z
%head = insertelement <32 x i32> poison, i32 %w, i32 0
@@ -683,499 +174,105 @@ declare <32 x i32> @ext2(<32 x i32>, <32 x i32>, i32, i32)
declare <32 x i32> @ext3(<32 x i32>, <32 x i32>, <32 x i32>, i32, i32)
define <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %y, i32 %w) {
-; LMULMAX8-LABEL: ret_v32i32_call_v32i32_v32i32_i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: addi sp, sp, -16
-; LMULMAX8-NEXT: .cfi_def_cfa_offset 16
-; LMULMAX8-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; LMULMAX8-NEXT: .cfi_offset ra, -8
-; LMULMAX8-NEXT: vmv8r.v v24, v8
-; LMULMAX8-NEXT: li a1, 2
-; LMULMAX8-NEXT: vmv8r.v v8, v16
-; LMULMAX8-NEXT: vmv8r.v v16, v24
-; LMULMAX8-NEXT: call ext2
-; LMULMAX8-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; LMULMAX8-NEXT: addi sp, sp, 16
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX4-LABEL: ret_v32i32_call_v32i32_v32i32_i32:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: addi sp, sp, -16
-; LMULMAX4-NEXT: .cfi_def_cfa_offset 16
-; LMULMAX4-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; LMULMAX4-NEXT: .cfi_offset ra, -8
-; LMULMAX4-NEXT: vmv4r.v v24, v12
-; LMULMAX4-NEXT: vmv4r.v v28, v8
-; LMULMAX4-NEXT: li a1, 2
-; LMULMAX4-NEXT: vmv4r.v v8, v16
-; LMULMAX4-NEXT: vmv4r.v v12, v20
-; LMULMAX4-NEXT: vmv4r.v v16, v28
-; LMULMAX4-NEXT: vmv4r.v v20, v24
-; LMULMAX4-NEXT: call ext2
-; LMULMAX4-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; LMULMAX4-NEXT: addi sp, sp, 16
-; LMULMAX4-NEXT: ret
-;
-; LMULMAX2-LABEL: ret_v32i32_call_v32i32_v32i32_i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi sp, sp, -16
-; LMULMAX2-NEXT: .cfi_def_cfa_offset 16
-; LMULMAX2-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; LMULMAX2-NEXT: .cfi_offset ra, -8
-; LMULMAX2-NEXT: vmv2r.v v24, v14
-; LMULMAX2-NEXT: vmv2r.v v26, v12
-; LMULMAX2-NEXT: vmv2r.v v28, v10
-; LMULMAX2-NEXT: vmv2r.v v30, v8
-; LMULMAX2-NEXT: li a1, 2
-; LMULMAX2-NEXT: vmv2r.v v8, v16
-; LMULMAX2-NEXT: vmv2r.v v10, v18
-; LMULMAX2-NEXT: vmv2r.v v12, v20
-; LMULMAX2-NEXT: vmv2r.v v14, v22
-; LMULMAX2-NEXT: vmv2r.v v16, v30
-; LMULMAX2-NEXT: vmv2r.v v18, v28
-; LMULMAX2-NEXT: vmv2r.v v20, v26
-; LMULMAX2-NEXT: vmv2r.v v22, v24
-; LMULMAX2-NEXT: call ext2
-; LMULMAX2-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; LMULMAX2-NEXT: addi sp, sp, 16
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: ret_v32i32_call_v32i32_v32i32_i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi sp, sp, -16
-; LMULMAX1-NEXT: .cfi_def_cfa_offset 16
-; LMULMAX1-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; LMULMAX1-NEXT: .cfi_offset ra, -8
-; LMULMAX1-NEXT: vmv1r.v v24, v15
-; LMULMAX1-NEXT: vmv1r.v v25, v14
-; LMULMAX1-NEXT: vmv1r.v v26, v13
-; LMULMAX1-NEXT: vmv1r.v v27, v12
-; LMULMAX1-NEXT: vmv1r.v v28, v11
-; LMULMAX1-NEXT: vmv1r.v v29, v10
-; LMULMAX1-NEXT: vmv1r.v v30, v9
-; LMULMAX1-NEXT: vmv1r.v v31, v8
-; LMULMAX1-NEXT: li a1, 2
-; LMULMAX1-NEXT: vmv1r.v v8, v16
-; LMULMAX1-NEXT: vmv1r.v v9, v17
-; LMULMAX1-NEXT: vmv1r.v v10, v18
-; LMULMAX1-NEXT: vmv1r.v v11, v19
-; LMULMAX1-NEXT: vmv1r.v v12, v20
-; LMULMAX1-NEXT: vmv1r.v v13, v21
-; LMULMAX1-NEXT: vmv1r.v v14, v22
-; LMULMAX1-NEXT: vmv1r.v v15, v23
-; LMULMAX1-NEXT: vmv1r.v v16, v31
-; LMULMAX1-NEXT: vmv1r.v v17, v30
-; LMULMAX1-NEXT: vmv1r.v v18, v29
-; LMULMAX1-NEXT: vmv1r.v v19, v28
-; LMULMAX1-NEXT: vmv1r.v v20, v27
-; LMULMAX1-NEXT: vmv1r.v v21, v26
-; LMULMAX1-NEXT: vmv1r.v v22, v25
-; LMULMAX1-NEXT: vmv1r.v v23, v24
-; LMULMAX1-NEXT: call ext2
-; LMULMAX1-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; LMULMAX1-NEXT: addi sp, sp, 16
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: ret_v32i32_call_v32i32_v32i32_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT: .cfi_offset ra, -8
+; CHECK-NEXT: vmv8r.v v24, v8
+; CHECK-NEXT: li a1, 2
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: vmv8r.v v16, v24
+; CHECK-NEXT: call ext2
+; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
%t = call <32 x i32> @ext2(<32 x i32> %y, <32 x i32> %x, i32 %w, i32 2)
ret <32 x i32> %t
}
define <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %w) {
-; LMULMAX8-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: addi sp, sp, -256
-; LMULMAX8-NEXT: .cfi_def_cfa_offset 256
-; LMULMAX8-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
-; LMULMAX8-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
-; LMULMAX8-NEXT: .cfi_offset ra, -8
-; LMULMAX8-NEXT: .cfi_offset s0, -16
-; LMULMAX8-NEXT: addi s0, sp, 256
-; LMULMAX8-NEXT: .cfi_def_cfa s0, 0
-; LMULMAX8-NEXT: andi sp, sp, -128
-; LMULMAX8-NEXT: li a2, 32
-; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, ma
-; LMULMAX8-NEXT: vle32.v v24, (a0)
-; LMULMAX8-NEXT: mv a3, sp
-; LMULMAX8-NEXT: mv a0, sp
-; LMULMAX8-NEXT: li a2, 42
-; LMULMAX8-NEXT: vse32.v v8, (a3)
-; LMULMAX8-NEXT: vmv.v.v v8, v24
-; LMULMAX8-NEXT: call ext3
-; LMULMAX8-NEXT: addi sp, s0, -256
-; LMULMAX8-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
-; LMULMAX8-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
-; LMULMAX8-NEXT: addi sp, sp, 256
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX4-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: addi sp, sp, -256
-; LMULMAX4-NEXT: .cfi_def_cfa_offset 256
-; LMULMAX4-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
-; LMULMAX4-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
-; LMULMAX4-NEXT: .cfi_offset ra, -8
-; LMULMAX4-NEXT: .cfi_offset s0, -16
-; LMULMAX4-NEXT: addi s0, sp, 256
-; LMULMAX4-NEXT: .cfi_def_cfa s0, 0
-; LMULMAX4-NEXT: andi sp, sp, -128
-; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; LMULMAX4-NEXT: vle32.v v24, (a0)
-; LMULMAX4-NEXT: addi a0, a0, 64
-; LMULMAX4-NEXT: vle32.v v28, (a0)
-; LMULMAX4-NEXT: addi a0, sp, 64
-; LMULMAX4-NEXT: vse32.v v12, (a0)
-; LMULMAX4-NEXT: mv a1, sp
-; LMULMAX4-NEXT: mv a0, sp
-; LMULMAX4-NEXT: li a3, 42
-; LMULMAX4-NEXT: vse32.v v8, (a1)
-; LMULMAX4-NEXT: vmv.v.v v8, v24
-; LMULMAX4-NEXT: vmv.v.v v12, v28
-; LMULMAX4-NEXT: call ext3
-; LMULMAX4-NEXT: addi sp, s0, -256
-; LMULMAX4-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
-; LMULMAX4-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
-; LMULMAX4-NEXT: addi sp, sp, 256
-; LMULMAX4-NEXT: ret
-;
-; LMULMAX2-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi sp, sp, -256
-; LMULMAX2-NEXT: .cfi_def_cfa_offset 256
-; LMULMAX2-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
-; LMULMAX2-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
-; LMULMAX2-NEXT: .cfi_offset ra, -8
-; LMULMAX2-NEXT: .cfi_offset s0, -16
-; LMULMAX2-NEXT: addi s0, sp, 256
-; LMULMAX2-NEXT: .cfi_def_cfa s0, 0
-; LMULMAX2-NEXT: andi sp, sp, -128
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v24, (a0)
-; LMULMAX2-NEXT: addi a1, a0, 32
-; LMULMAX2-NEXT: vle32.v v26, (a1)
-; LMULMAX2-NEXT: addi a1, a0, 64
-; LMULMAX2-NEXT: vle32.v v28, (a1)
-; LMULMAX2-NEXT: addi a0, a0, 96
-; LMULMAX2-NEXT: vle32.v v30, (a0)
-; LMULMAX2-NEXT: addi a0, sp, 96
-; LMULMAX2-NEXT: vse32.v v14, (a0)
-; LMULMAX2-NEXT: addi a0, sp, 64
-; LMULMAX2-NEXT: vse32.v v12, (a0)
-; LMULMAX2-NEXT: addi a0, sp, 32
-; LMULMAX2-NEXT: vse32.v v10, (a0)
-; LMULMAX2-NEXT: mv a1, sp
-; LMULMAX2-NEXT: mv a0, sp
-; LMULMAX2-NEXT: li a5, 42
-; LMULMAX2-NEXT: vse32.v v8, (a1)
-; LMULMAX2-NEXT: vmv.v.v v8, v24
-; LMULMAX2-NEXT: vmv.v.v v10, v26
-; LMULMAX2-NEXT: vmv.v.v v12, v28
-; LMULMAX2-NEXT: vmv.v.v v14, v30
-; LMULMAX2-NEXT: call ext3
-; LMULMAX2-NEXT: addi sp, s0, -256
-; LMULMAX2-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
-; LMULMAX2-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
-; LMULMAX2-NEXT: addi sp, sp, 256
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi sp, sp, -256
-; LMULMAX1-NEXT: .cfi_def_cfa_offset 256
-; LMULMAX1-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
-; LMULMAX1-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
-; LMULMAX1-NEXT: sd s1, 232(sp) # 8-byte Folded Spill
-; LMULMAX1-NEXT: .cfi_offset ra, -8
-; LMULMAX1-NEXT: .cfi_offset s0, -16
-; LMULMAX1-NEXT: .cfi_offset s1, -24
-; LMULMAX1-NEXT: addi s0, sp, 256
-; LMULMAX1-NEXT: .cfi_def_cfa s0, 0
-; LMULMAX1-NEXT: andi sp, sp, -128
-; LMULMAX1-NEXT: mv s1, sp
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vle32.v v24, (a0)
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vle32.v v25, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 32
-; LMULMAX1-NEXT: vle32.v v26, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 48
-; LMULMAX1-NEXT: vle32.v v27, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 64
-; LMULMAX1-NEXT: vle32.v v28, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 80
-; LMULMAX1-NEXT: vle32.v v29, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 96
-; LMULMAX1-NEXT: vle32.v v30, (a1)
-; LMULMAX1-NEXT: addi a0, a0, 112
-; LMULMAX1-NEXT: vle32.v v31, (a0)
-; LMULMAX1-NEXT: ld a1, 0(s0)
-; LMULMAX1-NEXT: addi sp, sp, -16
-; LMULMAX1-NEXT: addi a0, s1, 112
-; LMULMAX1-NEXT: vse32.v v15, (a0)
-; LMULMAX1-NEXT: addi a0, s1, 96
-; LMULMAX1-NEXT: vse32.v v14, (a0)
-; LMULMAX1-NEXT: addi a0, s1, 80
-; LMULMAX1-NEXT: vse32.v v13, (a0)
-; LMULMAX1-NEXT: addi a0, s1, 64
-; LMULMAX1-NEXT: vse32.v v12, (a0)
-; LMULMAX1-NEXT: addi a0, s1, 48
-; LMULMAX1-NEXT: vse32.v v11, (a0)
-; LMULMAX1-NEXT: addi a0, s1, 32
-; LMULMAX1-NEXT: vse32.v v10, (a0)
-; LMULMAX1-NEXT: addi a0, s1, 16
-; LMULMAX1-NEXT: vse32.v v9, (a0)
-; LMULMAX1-NEXT: mv a0, s1
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: li a0, 42
-; LMULMAX1-NEXT: sd a0, 8(sp)
-; LMULMAX1-NEXT: mv a0, s1
-; LMULMAX1-NEXT: sd a1, 0(sp)
-; LMULMAX1-NEXT: vmv.v.v v8, v24
-; LMULMAX1-NEXT: vmv.v.v v9, v25
-; LMULMAX1-NEXT: vmv.v.v v10, v26
-; LMULMAX1-NEXT: vmv.v.v v11, v27
-; LMULMAX1-NEXT: vmv.v.v v12, v28
-; LMULMAX1-NEXT: vmv.v.v v13, v29
-; LMULMAX1-NEXT: vmv.v.v v14, v30
-; LMULMAX1-NEXT: vmv.v.v v15, v31
-; LMULMAX1-NEXT: call ext3
-; LMULMAX1-NEXT: addi sp, sp, 16
-; LMULMAX1-NEXT: addi sp, s0, -256
-; LMULMAX1-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
-; LMULMAX1-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
-; LMULMAX1-NEXT: ld s1, 232(sp) # 8-byte Folded Reload
-; LMULMAX1-NEXT: addi sp, sp, 256
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -256
+; CHECK-NEXT: .cfi_def_cfa_offset 256
+; CHECK-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
+; CHECK-NEXT: .cfi_offset ra, -8
+; CHECK-NEXT: .cfi_offset s0, -16
+; CHECK-NEXT: addi s0, sp, 256
+; CHECK-NEXT: .cfi_def_cfa s0, 0
+; CHECK-NEXT: andi sp, sp, -128
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: mv a3, sp
+; CHECK-NEXT: mv a0, sp
+; CHECK-NEXT: li a2, 42
+; CHECK-NEXT: vse32.v v8, (a3)
+; CHECK-NEXT: vmv.v.v v8, v24
+; CHECK-NEXT: call ext3
+; CHECK-NEXT: addi sp, s0, -256
+; CHECK-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 256
+; CHECK-NEXT: ret
%t = call <32 x i32> @ext3(<32 x i32> %z, <32 x i32> %y, <32 x i32> %x, i32 %w, i32 42)
ret <32 x i32> %t
}
; Test various configurations of split vector types where the values are split
; across both registers and the stack.
-; LMUL8: Ins: v8,v9,v10,v11,v12, v16m8 y[0:31], a0+0 z[0:31]
-; LMUL4: Ins: v8,v9,v10,v11,v12, v16m4 y[0:15], v20m4 y[16:31], a0+0 z[0:15],
; a0+64 z[16:31]
-; LMUL2: Ins: v8,v9,v10,v11,v12, v14m2 y[0:7], v16m2 y[8:15], v18m2 y[16:23],
; v20m2 y[24:31], v22m2 z[0:7], a1+0 z[8:15], a1+32 z[16:23],
; a1+64 z[24:31]
-; LMUL1: Ins: v8,v9,v10,v11,v12, v13 y[0:3], v14 y[4:7], v15 y[8:11],
; v16 y[12:15], v17 y[16:19], v18 y[20:23], v19 y[24:27],
; v20 y[28:31], v21 z[0:3], v22 z[4:7], v23 z[8:11],
; a1+0 z[12:15], a1+16 z[16:19], a1+32 z[20:23], a1+48 z[24:27],
; a1+64 z[28:31]
define <32 x i32> @split_vector_args(<2 x i32>,<2 x i32>,<2 x i32>,<2 x i32>,<2 x i32>, <32 x i32> %y, <32 x i32> %z) {
-; LMULMAX8-LABEL: split_vector_args:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a1, 32
-; LMULMAX8-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; LMULMAX8-NEXT: vle32.v v8, (a0)
-; LMULMAX8-NEXT: vadd.vv v8, v16, v8
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX4-LABEL: split_vector_args:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: addi a1, a0, 64
-; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; LMULMAX4-NEXT: vle32.v v8, (a0)
-; LMULMAX4-NEXT: vle32.v v12, (a1)
-; LMULMAX4-NEXT: vadd.vv v8, v16, v8
-; LMULMAX4-NEXT: vadd.vv v12, v20, v12
-; LMULMAX4-NEXT: ret
-;
-; LMULMAX2-LABEL: split_vector_args:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a1, a0, 64
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v10, (a0)
-; LMULMAX2-NEXT: addi a0, a0, 32
-; LMULMAX2-NEXT: vle32.v v12, (a0)
-; LMULMAX2-NEXT: vle32.v v24, (a1)
-; LMULMAX2-NEXT: vadd.vv v8, v14, v22
-; LMULMAX2-NEXT: vadd.vv v10, v16, v10
-; LMULMAX2-NEXT: vadd.vv v12, v18, v12
-; LMULMAX2-NEXT: vadd.vv v14, v20, v24
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: split_vector_args:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, a0, 64
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vle32.v v24, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 48
-; LMULMAX1-NEXT: vle32.v v25, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 32
-; LMULMAX1-NEXT: vle32.v v26, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vle32.v v12, (a1)
-; LMULMAX1-NEXT: vle32.v v11, (a0)
-; LMULMAX1-NEXT: vadd.vv v8, v13, v21
-; LMULMAX1-NEXT: vadd.vv v9, v14, v22
-; LMULMAX1-NEXT: vadd.vv v10, v15, v23
-; LMULMAX1-NEXT: vadd.vv v11, v16, v11
-; LMULMAX1-NEXT: vadd.vv v12, v17, v12
-; LMULMAX1-NEXT: vadd.vv v13, v18, v26
-; LMULMAX1-NEXT: vadd.vv v14, v19, v25
-; LMULMAX1-NEXT: vadd.vv v15, v20, v24
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: split_vector_args:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 32
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vadd.vv v8, v16, v8
+; CHECK-NEXT: ret
%v0 = add <32 x i32> %y, %z
ret <32 x i32> %v0
}
define <32 x i32> @call_split_vector_args(ptr %pa, ptr %pb) {
-; LMULMAX8-LABEL: call_split_vector_args:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: addi sp, sp, -256
-; LMULMAX8-NEXT: .cfi_def_cfa_offset 256
-; LMULMAX8-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
-; LMULMAX8-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
-; LMULMAX8-NEXT: .cfi_offset ra, -8
-; LMULMAX8-NEXT: .cfi_offset s0, -16
-; LMULMAX8-NEXT: addi s0, sp, 256
-; LMULMAX8-NEXT: .cfi_def_cfa s0, 0
-; LMULMAX8-NEXT: andi sp, sp, -128
-; LMULMAX8-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX8-NEXT: vle32.v v8, (a0)
-; LMULMAX8-NEXT: li a0, 32
-; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; LMULMAX8-NEXT: vle32.v v16, (a1)
-; LMULMAX8-NEXT: mv a1, sp
-; LMULMAX8-NEXT: mv a0, sp
-; LMULMAX8-NEXT: vse32.v v16, (a1)
-; LMULMAX8-NEXT: vmv1r.v v9, v8
-; LMULMAX8-NEXT: vmv1r.v v10, v8
-; LMULMAX8-NEXT: vmv1r.v v11, v8
-; LMULMAX8-NEXT: vmv1r.v v12, v8
-; LMULMAX8-NEXT: call split_vector_args
-; LMULMAX8-NEXT: addi sp, s0, -256
-; LMULMAX8-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
-; LMULMAX8-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
-; LMULMAX8-NEXT: addi sp, sp, 256
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX4-LABEL: call_split_vector_args:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: addi sp, sp, -256
-; LMULMAX4-NEXT: .cfi_def_cfa_offset 256
-; LMULMAX4-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
-; LMULMAX4-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
-; LMULMAX4-NEXT: .cfi_offset ra, -8
-; LMULMAX4-NEXT: .cfi_offset s0, -16
-; LMULMAX4-NEXT: addi s0, sp, 256
-; LMULMAX4-NEXT: .cfi_def_cfa s0, 0
-; LMULMAX4-NEXT: andi sp, sp, -128
-; LMULMAX4-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX4-NEXT: vle32.v v8, (a0)
-; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; LMULMAX4-NEXT: vle32.v v16, (a1)
-; LMULMAX4-NEXT: addi a0, a1, 64
-; LMULMAX4-NEXT: vle32.v v20, (a0)
-; LMULMAX4-NEXT: addi a0, sp, 64
-; LMULMAX4-NEXT: vse32.v v20, (a0)
-; LMULMAX4-NEXT: mv a1, sp
-; LMULMAX4-NEXT: mv a0, sp
-; LMULMAX4-NEXT: vse32.v v16, (a1)
-; LMULMAX4-NEXT: vmv1r.v v9, v8
-; LMULMAX4-NEXT: vmv1r.v v10, v8
-; LMULMAX4-NEXT: vmv1r.v v11, v8
-; LMULMAX4-NEXT: vmv1r.v v12, v8
-; LMULMAX4-NEXT: call split_vector_args
-; LMULMAX4-NEXT: addi sp, s0, -256
-; LMULMAX4-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
-; LMULMAX4-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
-; LMULMAX4-NEXT: addi sp, sp, 256
-; LMULMAX4-NEXT: ret
-;
-; LMULMAX2-LABEL: call_split_vector_args:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi sp, sp, -128
-; LMULMAX2-NEXT: .cfi_def_cfa_offset 128
-; LMULMAX2-NEXT: sd ra, 120(sp) # 8-byte Folded Spill
-; LMULMAX2-NEXT: sd s0, 112(sp) # 8-byte Folded Spill
-; LMULMAX2-NEXT: .cfi_offset ra, -8
-; LMULMAX2-NEXT: .cfi_offset s0, -16
-; LMULMAX2-NEXT: addi s0, sp, 128
-; LMULMAX2-NEXT: .cfi_def_cfa s0, 0
-; LMULMAX2-NEXT: andi sp, sp, -128
-; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v14, (a1)
-; LMULMAX2-NEXT: addi a0, a1, 32
-; LMULMAX2-NEXT: vle32.v v16, (a0)
-; LMULMAX2-NEXT: addi a0, a1, 64
-; LMULMAX2-NEXT: vle32.v v18, (a0)
-; LMULMAX2-NEXT: addi a0, a1, 96
-; LMULMAX2-NEXT: vle32.v v20, (a0)
-; LMULMAX2-NEXT: addi a0, sp, 64
-; LMULMAX2-NEXT: vse32.v v20, (a0)
-; LMULMAX2-NEXT: addi a0, sp, 32
-; LMULMAX2-NEXT: vse32.v v18, (a0)
-; LMULMAX2-NEXT: mv a1, sp
-; LMULMAX2-NEXT: mv a0, sp
-; LMULMAX2-NEXT: vse32.v v16, (a1)
-; LMULMAX2-NEXT: vmv1r.v v9, v8
-; LMULMAX2-NEXT: vmv1r.v v10, v8
-; LMULMAX2-NEXT: vmv1r.v v11, v8
-; LMULMAX2-NEXT: vmv1r.v v12, v8
-; LMULMAX2-NEXT: vmv.v.v v22, v14
-; LMULMAX2-NEXT: call split_vector_args
-; LMULMAX2-NEXT: addi sp, s0, -128
-; LMULMAX2-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
-; LMULMAX2-NEXT: ld s0, 112(sp) # 8-byte Folded Reload
-; LMULMAX2-NEXT: addi sp, sp, 128
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: call_split_vector_args:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi sp, sp, -128
-; LMULMAX1-NEXT: .cfi_def_cfa_offset 128
-; LMULMAX1-NEXT: sd ra, 120(sp) # 8-byte Folded Spill
-; LMULMAX1-NEXT: sd s0, 112(sp) # 8-byte Folded Spill
-; LMULMAX1-NEXT: .cfi_offset ra, -8
-; LMULMAX1-NEXT: .cfi_offset s0, -16
-; LMULMAX1-NEXT: addi s0, sp, 128
-; LMULMAX1-NEXT: .cfi_def_cfa s0, 0
-; LMULMAX1-NEXT: andi sp, sp, -128
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vle32.v v8, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vle32.v v13, (a1)
-; LMULMAX1-NEXT: addi a0, a1, 32
-; LMULMAX1-NEXT: vle32.v v15, (a0)
-; LMULMAX1-NEXT: addi a0, a1, 16
-; LMULMAX1-NEXT: vle32.v v14, (a0)
-; LMULMAX1-NEXT: addi a0, a1, 48
-; LMULMAX1-NEXT: vle32.v v16, (a0)
-; LMULMAX1-NEXT: addi a0, a1, 64
-; LMULMAX1-NEXT: vle32.v v17, (a0)
-; LMULMAX1-NEXT: addi a0, a1, 80
-; LMULMAX1-NEXT: vle32.v v18, (a0)
-; LMULMAX1-NEXT: addi a0, a1, 96
-; LMULMAX1-NEXT: vle32.v v19, (a0)
-; LMULMAX1-NEXT: addi a0, a1, 112
-; LMULMAX1-NEXT: vle32.v v20, (a0)
-; LMULMAX1-NEXT: addi a0, sp, 64
-; LMULMAX1-NEXT: vse32.v v20, (a0)
-; LMULMAX1-NEXT: addi a0, sp, 48
-; LMULMAX1-NEXT: vse32.v v19, (a0)
-; LMULMAX1-NEXT: addi a0, sp, 32
-; LMULMAX1-NEXT: vse32.v v18, (a0)
-; LMULMAX1-NEXT: addi a0, sp, 16
-; LMULMAX1-NEXT: vse32.v v17, (a0)
-; LMULMAX1-NEXT: mv a1, sp
-; LMULMAX1-NEXT: mv a0, sp
-; LMULMAX1-NEXT: vse32.v v16, (a1)
-; LMULMAX1-NEXT: vmv1r.v v9, v8
-; LMULMAX1-NEXT: vmv1r.v v10, v8
-; LMULMAX1-NEXT: vmv1r.v v11, v8
-; LMULMAX1-NEXT: vmv1r.v v12, v8
-; LMULMAX1-NEXT: vmv.v.v v21, v13
-; LMULMAX1-NEXT: vmv.v.v v22, v14
-; LMULMAX1-NEXT: vmv.v.v v23, v15
-; LMULMAX1-NEXT: call split_vector_args
-; LMULMAX1-NEXT: addi sp, s0, -128
-; LMULMAX1-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
-; LMULMAX1-NEXT: ld s0, 112(sp) # 8-byte Folded Reload
-; LMULMAX1-NEXT: addi sp, sp, 128
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: call_split_vector_args:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -256
+; CHECK-NEXT: .cfi_def_cfa_offset 256
+; CHECK-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
+; CHECK-NEXT: .cfi_offset ra, -8
+; CHECK-NEXT: .cfi_offset s0, -16
+; CHECK-NEXT: addi s0, sp, 256
+; CHECK-NEXT: .cfi_def_cfa s0, 0
+; CHECK-NEXT: andi sp, sp, -128
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vle32.v v16, (a1)
+; CHECK-NEXT: mv a1, sp
+; CHECK-NEXT: mv a0, sp
+; CHECK-NEXT: vse32.v v16, (a1)
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: call split_vector_args
+; CHECK-NEXT: addi sp, s0, -256
+; CHECK-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 256
+; CHECK-NEXT: ret
%a = load <2 x i32>, ptr %pa
%b = load <32 x i32>, ptr %pb
%r = call <32 x i32> @split_vector_args(<2 x i32> %a, <2 x i32> %a, <2 x i32> %a, <2 x i32> %a, <2 x i32> %a, <32 x i32> %b, <32 x i32> %b)
@@ -1185,217 +282,44 @@ define <32 x i32> @call_split_vector_args(ptr %pa, ptr %pb) {
; A rather pathological test case in which we exhaust all vector registers and
; all scalar registers, forcing %z and %8 to go through the stack.
define <32 x i32> @vector_arg_via_stack(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, <32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %8) {
-; LMULMAX8-LABEL: vector_arg_via_stack:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a0, 32
-; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; LMULMAX8-NEXT: vle32.v v16, (sp)
-; LMULMAX8-NEXT: vadd.vv v8, v8, v16
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX4-LABEL: vector_arg_via_stack:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; LMULMAX4-NEXT: vle32.v v16, (sp)
-; LMULMAX4-NEXT: addi a0, sp, 64
-; LMULMAX4-NEXT: vle32.v v20, (a0)
-; LMULMAX4-NEXT: vadd.vv v8, v8, v16
-; LMULMAX4-NEXT: vadd.vv v12, v12, v20
-; LMULMAX4-NEXT: ret
-;
-; LMULMAX2-LABEL: vector_arg_via_stack:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: addi a0, sp, 64
-; LMULMAX2-NEXT: vle32.v v16, (a0)
-; LMULMAX2-NEXT: vle32.v v18, (sp)
-; LMULMAX2-NEXT: addi a0, sp, 32
-; LMULMAX2-NEXT: vle32.v v20, (a0)
-; LMULMAX2-NEXT: addi a0, sp, 96
-; LMULMAX2-NEXT: vle32.v v22, (a0)
-; LMULMAX2-NEXT: vadd.vv v8, v8, v18
-; LMULMAX2-NEXT: vadd.vv v10, v10, v20
-; LMULMAX2-NEXT: vadd.vv v12, v12, v16
-; LMULMAX2-NEXT: vadd.vv v14, v14, v22
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: vector_arg_via_stack:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a0, sp, 112
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vle32.v v16, (a0)
-; LMULMAX1-NEXT: addi a0, sp, 96
-; LMULMAX1-NEXT: vle32.v v17, (a0)
-; LMULMAX1-NEXT: addi a0, sp, 80
-; LMULMAX1-NEXT: vle32.v v18, (a0)
-; LMULMAX1-NEXT: addi a0, sp, 64
-; LMULMAX1-NEXT: vle32.v v19, (a0)
-; LMULMAX1-NEXT: addi a0, sp, 32
-; LMULMAX1-NEXT: vle32.v v20, (a0)
-; LMULMAX1-NEXT: vle32.v v21, (sp)
-; LMULMAX1-NEXT: addi a0, sp, 16
-; LMULMAX1-NEXT: vle32.v v22, (a0)
-; LMULMAX1-NEXT: addi a0, sp, 48
-; LMULMAX1-NEXT: vle32.v v23, (a0)
-; LMULMAX1-NEXT: vadd.vv v8, v8, v21
-; LMULMAX1-NEXT: vadd.vv v9, v9, v22
-; LMULMAX1-NEXT: vadd.vv v10, v10, v20
-; LMULMAX1-NEXT: vadd.vv v11, v11, v23
-; LMULMAX1-NEXT: vadd.vv v12, v12, v19
-; LMULMAX1-NEXT: vadd.vv v13, v13, v18
-; LMULMAX1-NEXT: vadd.vv v14, v14, v17
-; LMULMAX1-NEXT: vadd.vv v15, v15, v16
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: vector_arg_via_stack:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vle32.v v16, (sp)
+; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: ret
%s = add <32 x i32> %x, %z
ret <32 x i32> %s
}
; Calling the function above. Ensure we pass the arguments correctly.
define <32 x i32> @pass_vector_arg_via_stack(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z) {
-; LMULMAX8-LABEL: pass_vector_arg_via_stack:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: addi sp, sp, -144
-; LMULMAX8-NEXT: .cfi_def_cfa_offset 144
-; LMULMAX8-NEXT: sd ra, 136(sp) # 8-byte Folded Spill
-; LMULMAX8-NEXT: .cfi_offset ra, -8
-; LMULMAX8-NEXT: li a0, 32
-; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; LMULMAX8-NEXT: vmv.v.i v8, 0
-; LMULMAX8-NEXT: vse32.v v8, (sp)
-; LMULMAX8-NEXT: li a0, 8
-; LMULMAX8-NEXT: li a1, 1
-; LMULMAX8-NEXT: li a2, 2
-; LMULMAX8-NEXT: li a3, 3
-; LMULMAX8-NEXT: li a4, 4
-; LMULMAX8-NEXT: li a5, 5
-; LMULMAX8-NEXT: li a6, 6
-; LMULMAX8-NEXT: li a7, 7
-; LMULMAX8-NEXT: sd a0, 128(sp)
-; LMULMAX8-NEXT: li a0, 0
-; LMULMAX8-NEXT: vmv.v.i v16, 0
-; LMULMAX8-NEXT: call vector_arg_via_stack
-; LMULMAX8-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
-; LMULMAX8-NEXT: addi sp, sp, 144
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX4-LABEL: pass_vector_arg_via_stack:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: addi sp, sp, -144
-; LMULMAX4-NEXT: .cfi_def_cfa_offset 144
-; LMULMAX4-NEXT: sd ra, 136(sp) # 8-byte Folded Spill
-; LMULMAX4-NEXT: .cfi_offset ra, -8
-; LMULMAX4-NEXT: li a0, 8
-; LMULMAX4-NEXT: sd a0, 128(sp)
-; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; LMULMAX4-NEXT: vmv.v.i v8, 0
-; LMULMAX4-NEXT: vse32.v v8, (sp)
-; LMULMAX4-NEXT: addi a0, sp, 64
-; LMULMAX4-NEXT: li a1, 1
-; LMULMAX4-NEXT: li a2, 2
-; LMULMAX4-NEXT: li a3, 3
-; LMULMAX4-NEXT: li a4, 4
-; LMULMAX4-NEXT: li a5, 5
-; LMULMAX4-NEXT: li a6, 6
-; LMULMAX4-NEXT: li a7, 7
-; LMULMAX4-NEXT: vse32.v v8, (a0)
-; LMULMAX4-NEXT: li a0, 0
-; LMULMAX4-NEXT: vmv.v.i v12, 0
-; LMULMAX4-NEXT: vmv.v.i v16, 0
-; LMULMAX4-NEXT: vmv.v.i v20, 0
-; LMULMAX4-NEXT: call vector_arg_via_stack
-; LMULMAX4-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
-; LMULMAX4-NEXT: addi sp, sp, 144
-; LMULMAX4-NEXT: ret
-;
-; LMULMAX2-LABEL: pass_vector_arg_via_stack:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi sp, sp, -144
-; LMULMAX2-NEXT: .cfi_def_cfa_offset 144
-; LMULMAX2-NEXT: sd ra, 136(sp) # 8-byte Folded Spill
-; LMULMAX2-NEXT: .cfi_offset ra, -8
-; LMULMAX2-NEXT: li a0, 8
-; LMULMAX2-NEXT: sd a0, 128(sp)
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vmv.v.i v8, 0
-; LMULMAX2-NEXT: vse32.v v8, (sp)
-; LMULMAX2-NEXT: addi a0, sp, 96
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: addi a0, sp, 64
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: addi a0, sp, 32
-; LMULMAX2-NEXT: li a1, 1
-; LMULMAX2-NEXT: li a2, 2
-; LMULMAX2-NEXT: li a3, 3
-; LMULMAX2-NEXT: li a4, 4
-; LMULMAX2-NEXT: li a5, 5
-; LMULMAX2-NEXT: li a6, 6
-; LMULMAX2-NEXT: li a7, 7
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: li a0, 0
-; LMULMAX2-NEXT: vmv.v.i v10, 0
-; LMULMAX2-NEXT: vmv.v.i v12, 0
-; LMULMAX2-NEXT: vmv.v.i v14, 0
-; LMULMAX2-NEXT: vmv.v.i v16, 0
-; LMULMAX2-NEXT: vmv.v.i v18, 0
-; LMULMAX2-NEXT: vmv.v.i v20, 0
-; LMULMAX2-NEXT: vmv.v.i v22, 0
-; LMULMAX2-NEXT: call vector_arg_via_stack
-; LMULMAX2-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
-; LMULMAX2-NEXT: addi sp, sp, 144
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: pass_vector_arg_via_stack:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi sp, sp, -144
-; LMULMAX1-NEXT: .cfi_def_cfa_offset 144
-; LMULMAX1-NEXT: sd ra, 136(sp) # 8-byte Folded Spill
-; LMULMAX1-NEXT: .cfi_offset ra, -8
-; LMULMAX1-NEXT: li a0, 8
-; LMULMAX1-NEXT: sd a0, 128(sp)
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v8, 0
-; LMULMAX1-NEXT: vse32.v v8, (sp)
-; LMULMAX1-NEXT: addi a0, sp, 112
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, sp, 96
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, sp, 80
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, sp, 64
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, sp, 48
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, sp, 32
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, sp, 16
-; LMULMAX1-NEXT: li a1, 1
-; LMULMAX1-NEXT: li a2, 2
-; LMULMAX1-NEXT: li a3, 3
-; LMULMAX1-NEXT: li a4, 4
-; LMULMAX1-NEXT: li a5, 5
-; LMULMAX1-NEXT: li a6, 6
-; LMULMAX1-NEXT: li a7, 7
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: li a0, 0
-; LMULMAX1-NEXT: vmv.v.i v9, 0
-; LMULMAX1-NEXT: vmv.v.i v10, 0
-; LMULMAX1-NEXT: vmv.v.i v11, 0
-; LMULMAX1-NEXT: vmv.v.i v12, 0
-; LMULMAX1-NEXT: vmv.v.i v13, 0
-; LMULMAX1-NEXT: vmv.v.i v14, 0
-; LMULMAX1-NEXT: vmv.v.i v15, 0
-; LMULMAX1-NEXT: vmv.v.i v16, 0
-; LMULMAX1-NEXT: vmv.v.i v17, 0
-; LMULMAX1-NEXT: vmv.v.i v18, 0
-; LMULMAX1-NEXT: vmv.v.i v19, 0
-; LMULMAX1-NEXT: vmv.v.i v20, 0
-; LMULMAX1-NEXT: vmv.v.i v21, 0
-; LMULMAX1-NEXT: vmv.v.i v22, 0
-; LMULMAX1-NEXT: vmv.v.i v23, 0
-; LMULMAX1-NEXT: call vector_arg_via_stack
-; LMULMAX1-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
-; LMULMAX1-NEXT: addi sp, sp, 144
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: pass_vector_arg_via_stack:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -144
+; CHECK-NEXT: .cfi_def_cfa_offset 144
+; CHECK-NEXT: sd ra, 136(sp) # 8-byte Folded Spill
+; CHECK-NEXT: .cfi_offset ra, -8
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vse32.v v8, (sp)
+; CHECK-NEXT: li a0, 8
+; CHECK-NEXT: li a1, 1
+; CHECK-NEXT: li a2, 2
+; CHECK-NEXT: li a3, 3
+; CHECK-NEXT: li a4, 4
+; CHECK-NEXT: li a5, 5
+; CHECK-NEXT: li a6, 6
+; CHECK-NEXT: li a7, 7
+; CHECK-NEXT: sd a0, 128(sp)
+; CHECK-NEXT: li a0, 0
+; CHECK-NEXT: vmv.v.i v16, 0
+; CHECK-NEXT: call vector_arg_via_stack
+; CHECK-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 144
+; CHECK-NEXT: ret
%s = call <32 x i32> @vector_arg_via_stack(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, i32 8)
ret <32 x i32> %s
}
@@ -1415,194 +339,42 @@ define <4 x i1> @vector_mask_arg_via_stack(i32 %0, i32 %1, i32 %2, i32 %3, i32 %
; Calling the function above. Ensure we pass the mask arguments correctly. We
; legalize stores of small masks such that the value is at least byte-sized.
define <4 x i1> @pass_vector_mask_arg_via_stack(<4 x i1> %v) {
-; LMULMAX8-LABEL: pass_vector_mask_arg_via_stack:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: addi sp, sp, -160
-; LMULMAX8-NEXT: .cfi_def_cfa_offset 160
-; LMULMAX8-NEXT: sd ra, 152(sp) # 8-byte Folded Spill
-; LMULMAX8-NEXT: .cfi_offset ra, -8
-; LMULMAX8-NEXT: li a0, 32
-; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; LMULMAX8-NEXT: vmv.v.i v8, 0
-; LMULMAX8-NEXT: vse32.v v8, (sp)
-; LMULMAX8-NEXT: li a0, 8
-; LMULMAX8-NEXT: sd a0, 128(sp)
-; LMULMAX8-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX8-NEXT: vmv.v.i v16, 0
-; LMULMAX8-NEXT: vmerge.vim v16, v16, 1, v0
-; LMULMAX8-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX8-NEXT: vmv.v.i v17, 0
-; LMULMAX8-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
-; LMULMAX8-NEXT: vmv.v.v v17, v16
-; LMULMAX8-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX8-NEXT: vmsne.vi v16, v17, 0
-; LMULMAX8-NEXT: addi a0, sp, 136
-; LMULMAX8-NEXT: li a5, 5
-; LMULMAX8-NEXT: li a6, 6
-; LMULMAX8-NEXT: li a7, 7
-; LMULMAX8-NEXT: vsm.v v16, (a0)
-; LMULMAX8-NEXT: li a0, 0
-; LMULMAX8-NEXT: li a1, 0
-; LMULMAX8-NEXT: li a2, 0
-; LMULMAX8-NEXT: li a3, 0
-; LMULMAX8-NEXT: li a4, 0
-; LMULMAX8-NEXT: vmv8r.v v16, v8
-; LMULMAX8-NEXT: call vector_mask_arg_via_stack
-; LMULMAX8-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
-; LMULMAX8-NEXT: addi sp, sp, 160
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX4-LABEL: pass_vector_mask_arg_via_stack:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: addi sp, sp, -160
-; LMULMAX4-NEXT: .cfi_def_cfa_offset 160
-; LMULMAX4-NEXT: sd ra, 152(sp) # 8-byte Folded Spill
-; LMULMAX4-NEXT: .cfi_offset ra, -8
-; LMULMAX4-NEXT: li a0, 8
-; LMULMAX4-NEXT: sd a0, 128(sp)
-; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; LMULMAX4-NEXT: vmv.v.i v8, 0
-; LMULMAX4-NEXT: vse32.v v8, (sp)
-; LMULMAX4-NEXT: addi a0, sp, 64
-; LMULMAX4-NEXT: vse32.v v8, (a0)
-; LMULMAX4-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX4-NEXT: vmv.v.i v12, 0
-; LMULMAX4-NEXT: vmerge.vim v12, v12, 1, v0
-; LMULMAX4-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX4-NEXT: vmv.v.i v13, 0
-; LMULMAX4-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
-; LMULMAX4-NEXT: vmv.v.v v13, v12
-; LMULMAX4-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX4-NEXT: vmsne.vi v12, v13, 0
-; LMULMAX4-NEXT: addi a0, sp, 136
-; LMULMAX4-NEXT: li a5, 5
-; LMULMAX4-NEXT: li a6, 6
-; LMULMAX4-NEXT: li a7, 7
-; LMULMAX4-NEXT: vsm.v v12, (a0)
-; LMULMAX4-NEXT: li a0, 0
-; LMULMAX4-NEXT: li a1, 0
-; LMULMAX4-NEXT: li a2, 0
-; LMULMAX4-NEXT: li a3, 0
-; LMULMAX4-NEXT: li a4, 0
-; LMULMAX4-NEXT: vmv4r.v v12, v8
-; LMULMAX4-NEXT: vmv4r.v v16, v8
-; LMULMAX4-NEXT: vmv4r.v v20, v8
-; LMULMAX4-NEXT: call vector_mask_arg_via_stack
-; LMULMAX4-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
-; LMULMAX4-NEXT: addi sp, sp, 160
-; LMULMAX4-NEXT: ret
-;
-; LMULMAX2-LABEL: pass_vector_mask_arg_via_stack:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi sp, sp, -160
-; LMULMAX2-NEXT: .cfi_def_cfa_offset 160
-; LMULMAX2-NEXT: sd ra, 152(sp) # 8-byte Folded Spill
-; LMULMAX2-NEXT: .cfi_offset ra, -8
-; LMULMAX2-NEXT: li a0, 8
-; LMULMAX2-NEXT: sd a0, 128(sp)
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vmv.v.i v8, 0
-; LMULMAX2-NEXT: vse32.v v8, (sp)
-; LMULMAX2-NEXT: addi a0, sp, 96
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: addi a0, sp, 64
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: addi a0, sp, 32
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX2-NEXT: vmv.v.i v10, 0
-; LMULMAX2-NEXT: vmerge.vim v10, v10, 1, v0
-; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX2-NEXT: vmv.v.i v11, 0
-; LMULMAX2-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
-; LMULMAX2-NEXT: vmv.v.v v11, v10
-; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX2-NEXT: vmsne.vi v10, v11, 0
-; LMULMAX2-NEXT: addi a0, sp, 136
-; LMULMAX2-NEXT: li a5, 5
-; LMULMAX2-NEXT: li a6, 6
-; LMULMAX2-NEXT: li a7, 7
-; LMULMAX2-NEXT: vsm.v v10, (a0)
-; LMULMAX2-NEXT: li a0, 0
-; LMULMAX2-NEXT: li a1, 0
-; LMULMAX2-NEXT: li a2, 0
-; LMULMAX2-NEXT: li a3, 0
-; LMULMAX2-NEXT: li a4, 0
-; LMULMAX2-NEXT: vmv2r.v v10, v8
-; LMULMAX2-NEXT: vmv2r.v v12, v8
-; LMULMAX2-NEXT: vmv2r.v v14, v8
-; LMULMAX2-NEXT: vmv2r.v v16, v8
-; LMULMAX2-NEXT: vmv2r.v v18, v8
-; LMULMAX2-NEXT: vmv2r.v v20, v8
-; LMULMAX2-NEXT: vmv2r.v v22, v8
-; LMULMAX2-NEXT: call vector_mask_arg_via_stack
-; LMULMAX2-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
-; LMULMAX2-NEXT: addi sp, sp, 160
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: pass_vector_mask_arg_via_stack:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi sp, sp, -160
-; LMULMAX1-NEXT: .cfi_def_cfa_offset 160
-; LMULMAX1-NEXT: sd ra, 152(sp) # 8-byte Folded Spill
-; LMULMAX1-NEXT: .cfi_offset ra, -8
-; LMULMAX1-NEXT: li a0, 8
-; LMULMAX1-NEXT: sd a0, 128(sp)
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v8, 0
-; LMULMAX1-NEXT: vse32.v v8, (sp)
-; LMULMAX1-NEXT: addi a0, sp, 112
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, sp, 96
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, sp, 80
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, sp, 64
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, sp, 48
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, sp, 32
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, sp, 16
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v9, 0
-; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v10, 0
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
-; LMULMAX1-NEXT: vmv.v.v v10, v9
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vmsne.vi v9, v10, 0
-; LMULMAX1-NEXT: addi a0, sp, 136
-; LMULMAX1-NEXT: li a5, 5
-; LMULMAX1-NEXT: li a6, 6
-; LMULMAX1-NEXT: li a7, 7
-; LMULMAX1-NEXT: vsm.v v9, (a0)
-; LMULMAX1-NEXT: li a0, 0
-; LMULMAX1-NEXT: li a1, 0
-; LMULMAX1-NEXT: li a2, 0
-; LMULMAX1-NEXT: li a3, 0
-; LMULMAX1-NEXT: li a4, 0
-; LMULMAX1-NEXT: vmv1r.v v9, v8
-; LMULMAX1-NEXT: vmv1r.v v10, v8
-; LMULMAX1-NEXT: vmv1r.v v11, v8
-; LMULMAX1-NEXT: vmv1r.v v12, v8
-; LMULMAX1-NEXT: vmv1r.v v13, v8
-; LMULMAX1-NEXT: vmv1r.v v14, v8
-; LMULMAX1-NEXT: vmv1r.v v15, v8
-; LMULMAX1-NEXT: vmv1r.v v16, v8
-; LMULMAX1-NEXT: vmv1r.v v17, v8
-; LMULMAX1-NEXT: vmv1r.v v18, v8
-; LMULMAX1-NEXT: vmv1r.v v19, v8
-; LMULMAX1-NEXT: vmv1r.v v20, v8
-; LMULMAX1-NEXT: vmv1r.v v21, v8
-; LMULMAX1-NEXT: vmv1r.v v22, v8
-; LMULMAX1-NEXT: vmv1r.v v23, v8
-; LMULMAX1-NEXT: call vector_mask_arg_via_stack
-; LMULMAX1-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
-; LMULMAX1-NEXT: addi sp, sp, 160
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: pass_vector_mask_arg_via_stack:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -160
+; CHECK-NEXT: .cfi_def_cfa_offset 160
+; CHECK-NEXT: sd ra, 152(sp) # 8-byte Folded Spill
+; CHECK-NEXT: .cfi_offset ra, -8
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vse32.v v8, (sp)
+; CHECK-NEXT: li a0, 8
+; CHECK-NEXT: sd a0, 128(sp)
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.i v16, 0
+; CHECK-NEXT: vmerge.vim v16, v16, 1, v0
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v17, 0
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
+; CHECK-NEXT: vmv.v.v v17, v16
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmsne.vi v16, v17, 0
+; CHECK-NEXT: addi a0, sp, 136
+; CHECK-NEXT: li a5, 5
+; CHECK-NEXT: li a6, 6
+; CHECK-NEXT: li a7, 7
+; CHECK-NEXT: vsm.v v16, (a0)
+; CHECK-NEXT: li a0, 0
+; CHECK-NEXT: li a1, 0
+; CHECK-NEXT: li a2, 0
+; CHECK-NEXT: li a3, 0
+; CHECK-NEXT: li a4, 0
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: call vector_mask_arg_via_stack
+; CHECK-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 160
+; CHECK-NEXT: ret
%r = call <4 x i1> @vector_mask_arg_via_stack(i32 0, i32 0, i32 0, i32 0, i32 0, i32 5, i32 6, i32 7, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, i32 8, <4 x i1> %v, <4 x i1> %v)
ret <4 x i1> %r
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
index a4a2cd1..3e5a89b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
@@ -1,61 +1,71 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m,+zve64x,+zvl128b -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2,LMULMAX2-RV32,LMULMAX2-RV32I
-; RUN: llc -mtriple=riscv64 -mattr=+m,+zve64x,+zvl128b -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2,LMULMAX2-RV64,LMULMAX2-RV64I
-; RUN: llc -mtriple=riscv32 -mattr=+m,+zve64x,+zvl128b -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1-RV32
-; RUN: llc -mtriple=riscv64 -mattr=+m,+zve64x,+zvl128b -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1-RV64
-; RUN: llc -mtriple=riscv32 -mattr=+m,+zve64f,+zvl128b,+f -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2,LMULMAX2-RV32,LMULMAX2-RV32F
-; RUN: llc -mtriple=riscv64 -mattr=+m,+zve64f,+zvl128b,+f -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2,LMULMAX2-RV64,LMULMAX2-RV64F
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+d -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2,LMULMAX2-RV32,LMULMAX2-RV32D
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+d -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2,LMULMAX2-RV64,LMULMAX2-RV64D
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+d -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1-RV32
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+d -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1-RV64
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+d -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=LMULMAX8
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+d -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=LMULMAX8
+; RUN: llc -mtriple=riscv32 -mattr=+m,+zve64x,+zvl128b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RVI,RV32I
+; RUN: llc -mtriple=riscv64 -mattr=+m,+zve64x,+zvl128b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RVI,RV64I
+; RUN: llc -mtriple=riscv32 -mattr=+m,+zve64f,+zvl128b,+f -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RVF,RV32F
+; RUN: llc -mtriple=riscv64 -mattr=+m,+zve64f,+zvl128b,+f -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RVF,RV64F
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RVD,RV32D
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RVD,RV64D
; RUN: llc -mtriple=riscv32 -mattr=+v,+zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVBB
; RUN: llc -mtriple=riscv64 -mattr=+v,+zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVBB
define void @ctlz_v16i8(ptr %x, ptr %y) nounwind {
-; CHECK-LABEL: ctlz_v16i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: vsrl.vi v9, v8, 1
-; CHECK-NEXT: vor.vv v8, v8, v9
-; CHECK-NEXT: vsrl.vi v9, v8, 2
-; CHECK-NEXT: vor.vv v8, v8, v9
-; CHECK-NEXT: vsrl.vi v9, v8, 4
-; CHECK-NEXT: vor.vv v8, v8, v9
-; CHECK-NEXT: vnot.v v8, v8
-; CHECK-NEXT: vsrl.vi v9, v8, 1
-; CHECK-NEXT: li a1, 85
-; CHECK-NEXT: vand.vx v9, v9, a1
-; CHECK-NEXT: vsub.vv v8, v8, v9
-; CHECK-NEXT: li a1, 51
-; CHECK-NEXT: vand.vx v9, v8, a1
-; CHECK-NEXT: vsrl.vi v8, v8, 2
-; CHECK-NEXT: vand.vx v8, v8, a1
-; CHECK-NEXT: vadd.vv v8, v9, v8
-; CHECK-NEXT: vsrl.vi v9, v8, 4
-; CHECK-NEXT: vadd.vv v8, v8, v9
-; CHECK-NEXT: vand.vi v8, v8, 15
-; CHECK-NEXT: vse8.v v8, (a0)
-; CHECK-NEXT: ret
-;
-; LMULMAX8-LABEL: ctlz_v16i8:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX8-NEXT: vle8.v v8, (a0)
-; LMULMAX8-NEXT: vzext.vf2 v10, v8
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v12, v10
-; LMULMAX8-NEXT: vnsrl.wi v8, v12, 23
-; LMULMAX8-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; LMULMAX8-NEXT: vnsrl.wi v10, v8, 0
-; LMULMAX8-NEXT: li a1, 134
-; LMULMAX8-NEXT: vrsub.vx v8, v10, a1
-; LMULMAX8-NEXT: li a1, 8
-; LMULMAX8-NEXT: vminu.vx v8, v8, a1
-; LMULMAX8-NEXT: vse8.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RVI-LABEL: ctlz_v16i8:
+; RVI: # %bb.0:
+; RVI-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RVI-NEXT: vle8.v v8, (a0)
+; RVI-NEXT: vsrl.vi v9, v8, 1
+; RVI-NEXT: vor.vv v8, v8, v9
+; RVI-NEXT: vsrl.vi v9, v8, 2
+; RVI-NEXT: vor.vv v8, v8, v9
+; RVI-NEXT: vsrl.vi v9, v8, 4
+; RVI-NEXT: vor.vv v8, v8, v9
+; RVI-NEXT: vnot.v v8, v8
+; RVI-NEXT: vsrl.vi v9, v8, 1
+; RVI-NEXT: li a1, 85
+; RVI-NEXT: vand.vx v9, v9, a1
+; RVI-NEXT: vsub.vv v8, v8, v9
+; RVI-NEXT: li a1, 51
+; RVI-NEXT: vand.vx v9, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 2
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: vadd.vv v8, v9, v8
+; RVI-NEXT: vsrl.vi v9, v8, 4
+; RVI-NEXT: vadd.vv v8, v8, v9
+; RVI-NEXT: vand.vi v8, v8, 15
+; RVI-NEXT: vse8.v v8, (a0)
+; RVI-NEXT: ret
+;
+; RVF-LABEL: ctlz_v16i8:
+; RVF: # %bb.0:
+; RVF-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RVF-NEXT: vle8.v v8, (a0)
+; RVF-NEXT: vzext.vf2 v10, v8
+; RVF-NEXT: vfwcvt.f.xu.v v12, v10
+; RVF-NEXT: vnsrl.wi v8, v12, 23
+; RVF-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; RVF-NEXT: vnsrl.wi v10, v8, 0
+; RVF-NEXT: li a1, 134
+; RVF-NEXT: vrsub.vx v8, v10, a1
+; RVF-NEXT: li a1, 8
+; RVF-NEXT: vminu.vx v8, v8, a1
+; RVF-NEXT: vse8.v v8, (a0)
+; RVF-NEXT: ret
+;
+; RVD-LABEL: ctlz_v16i8:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RVD-NEXT: vle8.v v8, (a0)
+; RVD-NEXT: vzext.vf2 v10, v8
+; RVD-NEXT: vfwcvt.f.xu.v v12, v10
+; RVD-NEXT: vnsrl.wi v8, v12, 23
+; RVD-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; RVD-NEXT: vnsrl.wi v10, v8, 0
+; RVD-NEXT: li a1, 134
+; RVD-NEXT: vrsub.vx v8, v10, a1
+; RVD-NEXT: li a1, 8
+; RVD-NEXT: vminu.vx v8, v8, a1
+; RVD-NEXT: vse8.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: ctlz_v16i8:
; ZVBB: # %bb.0:
@@ -73,175 +83,66 @@ define void @ctlz_v16i8(ptr %x, ptr %y) nounwind {
declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>, i1)
define void @ctlz_v8i16(ptr %x, ptr %y) nounwind {
-; LMULMAX2-RV32I-LABEL: ctlz_v8i16:
-; LMULMAX2-RV32I: # %bb.0:
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 2
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 8
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV32I-NEXT: lui a1, 5
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV32I-NEXT: vand.vx v9, v9, a1
-; LMULMAX2-RV32I-NEXT: vsub.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 3
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV32I-NEXT: vand.vx v9, v8, a1
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV32I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v9, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 1
-; LMULMAX2-RV32I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV32I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: li a1, 257
-; LMULMAX2-RV32I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 8
-; LMULMAX2-RV32I-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: ret
-;
-; LMULMAX2-RV64I-LABEL: ctlz_v8i16:
-; LMULMAX2-RV64I: # %bb.0:
-; LMULMAX2-RV64I-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX2-RV64I-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 2
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 8
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV64I-NEXT: lui a1, 5
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV64I-NEXT: vand.vx v9, v9, a1
-; LMULMAX2-RV64I-NEXT: vsub.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: lui a1, 3
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV64I-NEXT: vand.vx v9, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v9, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: lui a1, 1
-; LMULMAX2-RV64I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: li a1, 257
-; LMULMAX2-RV64I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 8
-; LMULMAX2-RV64I-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: ret
-;
-; LMULMAX1-LABEL: ctlz_v8i16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vle16.v v8, (a0)
-; LMULMAX1-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX1-NEXT: vor.vv v8, v8, v9
-; LMULMAX1-NEXT: vsrl.vi v9, v8, 2
-; LMULMAX1-NEXT: vor.vv v8, v8, v9
-; LMULMAX1-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX1-NEXT: vor.vv v8, v8, v9
-; LMULMAX1-NEXT: vsrl.vi v9, v8, 8
-; LMULMAX1-NEXT: vor.vv v8, v8, v9
-; LMULMAX1-NEXT: vnot.v v8, v8
-; LMULMAX1-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX1-NEXT: lui a1, 5
-; LMULMAX1-NEXT: addi a1, a1, 1365
-; LMULMAX1-NEXT: vand.vx v9, v9, a1
-; LMULMAX1-NEXT: vsub.vv v8, v8, v9
-; LMULMAX1-NEXT: lui a1, 3
-; LMULMAX1-NEXT: addi a1, a1, 819
-; LMULMAX1-NEXT: vand.vx v9, v8, a1
-; LMULMAX1-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX1-NEXT: vand.vx v8, v8, a1
-; LMULMAX1-NEXT: vadd.vv v8, v9, v8
-; LMULMAX1-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX1-NEXT: vadd.vv v8, v8, v9
-; LMULMAX1-NEXT: lui a1, 1
-; LMULMAX1-NEXT: addi a1, a1, -241
-; LMULMAX1-NEXT: vand.vx v8, v8, a1
-; LMULMAX1-NEXT: li a1, 257
-; LMULMAX1-NEXT: vmul.vx v8, v8, a1
-; LMULMAX1-NEXT: vsrl.vi v8, v8, 8
-; LMULMAX1-NEXT: vse16.v v8, (a0)
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX2-RV32F-LABEL: ctlz_v8i16:
-; LMULMAX2-RV32F: # %bb.0:
-; LMULMAX2-RV32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX2-RV32F-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: vfwcvt.f.xu.v v10, v8
-; LMULMAX2-RV32F-NEXT: vnsrl.wi v8, v10, 23
-; LMULMAX2-RV32F-NEXT: li a1, 142
-; LMULMAX2-RV32F-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV32F-NEXT: li a1, 16
-; LMULMAX2-RV32F-NEXT: vminu.vx v8, v8, a1
-; LMULMAX2-RV32F-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: ret
-;
-; LMULMAX2-RV64F-LABEL: ctlz_v8i16:
-; LMULMAX2-RV64F: # %bb.0:
-; LMULMAX2-RV64F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX2-RV64F-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: vfwcvt.f.xu.v v10, v8
-; LMULMAX2-RV64F-NEXT: vnsrl.wi v8, v10, 23
-; LMULMAX2-RV64F-NEXT: li a1, 142
-; LMULMAX2-RV64F-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV64F-NEXT: li a1, 16
-; LMULMAX2-RV64F-NEXT: vminu.vx v8, v8, a1
-; LMULMAX2-RV64F-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: ret
-;
-; LMULMAX2-RV32D-LABEL: ctlz_v8i16:
-; LMULMAX2-RV32D: # %bb.0:
-; LMULMAX2-RV32D-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX2-RV32D-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: vfwcvt.f.xu.v v10, v8
-; LMULMAX2-RV32D-NEXT: vnsrl.wi v8, v10, 23
-; LMULMAX2-RV32D-NEXT: li a1, 142
-; LMULMAX2-RV32D-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV32D-NEXT: li a1, 16
-; LMULMAX2-RV32D-NEXT: vminu.vx v8, v8, a1
-; LMULMAX2-RV32D-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: ret
-;
-; LMULMAX2-RV64D-LABEL: ctlz_v8i16:
-; LMULMAX2-RV64D: # %bb.0:
-; LMULMAX2-RV64D-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX2-RV64D-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: vfwcvt.f.xu.v v10, v8
-; LMULMAX2-RV64D-NEXT: vnsrl.wi v8, v10, 23
-; LMULMAX2-RV64D-NEXT: li a1, 142
-; LMULMAX2-RV64D-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV64D-NEXT: li a1, 16
-; LMULMAX2-RV64D-NEXT: vminu.vx v8, v8, a1
-; LMULMAX2-RV64D-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: ret
-;
-; LMULMAX8-LABEL: ctlz_v8i16:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX8-NEXT: vle16.v v8, (a0)
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v10, v8
-; LMULMAX8-NEXT: vnsrl.wi v8, v10, 23
-; LMULMAX8-NEXT: li a1, 142
-; LMULMAX8-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX8-NEXT: li a1, 16
-; LMULMAX8-NEXT: vminu.vx v8, v8, a1
-; LMULMAX8-NEXT: vse16.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RVI-LABEL: ctlz_v8i16:
+; RVI: # %bb.0:
+; RVI-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RVI-NEXT: vle16.v v8, (a0)
+; RVI-NEXT: vsrl.vi v9, v8, 1
+; RVI-NEXT: vor.vv v8, v8, v9
+; RVI-NEXT: vsrl.vi v9, v8, 2
+; RVI-NEXT: vor.vv v8, v8, v9
+; RVI-NEXT: vsrl.vi v9, v8, 4
+; RVI-NEXT: vor.vv v8, v8, v9
+; RVI-NEXT: vsrl.vi v9, v8, 8
+; RVI-NEXT: vor.vv v8, v8, v9
+; RVI-NEXT: vnot.v v8, v8
+; RVI-NEXT: vsrl.vi v9, v8, 1
+; RVI-NEXT: lui a1, 5
+; RVI-NEXT: addi a1, a1, 1365
+; RVI-NEXT: vand.vx v9, v9, a1
+; RVI-NEXT: vsub.vv v8, v8, v9
+; RVI-NEXT: lui a1, 3
+; RVI-NEXT: addi a1, a1, 819
+; RVI-NEXT: vand.vx v9, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 2
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: vadd.vv v8, v9, v8
+; RVI-NEXT: vsrl.vi v9, v8, 4
+; RVI-NEXT: vadd.vv v8, v8, v9
+; RVI-NEXT: lui a1, 1
+; RVI-NEXT: addi a1, a1, -241
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: li a1, 257
+; RVI-NEXT: vmul.vx v8, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 8
+; RVI-NEXT: vse16.v v8, (a0)
+; RVI-NEXT: ret
+;
+; RVF-LABEL: ctlz_v8i16:
+; RVF: # %bb.0:
+; RVF-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RVF-NEXT: vle16.v v8, (a0)
+; RVF-NEXT: vfwcvt.f.xu.v v10, v8
+; RVF-NEXT: vnsrl.wi v8, v10, 23
+; RVF-NEXT: li a1, 142
+; RVF-NEXT: vrsub.vx v8, v8, a1
+; RVF-NEXT: li a1, 16
+; RVF-NEXT: vminu.vx v8, v8, a1
+; RVF-NEXT: vse16.v v8, (a0)
+; RVF-NEXT: ret
+;
+; RVD-LABEL: ctlz_v8i16:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RVD-NEXT: vle16.v v8, (a0)
+; RVD-NEXT: vfwcvt.f.xu.v v10, v8
+; RVD-NEXT: vnsrl.wi v8, v10, 23
+; RVD-NEXT: li a1, 142
+; RVD-NEXT: vrsub.vx v8, v8, a1
+; RVD-NEXT: li a1, 16
+; RVD-NEXT: vminu.vx v8, v8, a1
+; RVD-NEXT: vse16.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: ctlz_v8i16:
; ZVBB: # %bb.0:
@@ -259,153 +160,72 @@ define void @ctlz_v8i16(ptr %x, ptr %y) nounwind {
declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>, i1)
define void @ctlz_v4i32(ptr %x, ptr %y) nounwind {
-; LMULMAX2-RV32I-LABEL: ctlz_v4i32:
-; LMULMAX2-RV32I: # %bb.0:
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 2
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 8
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 16
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV32I-NEXT: lui a1, 349525
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV32I-NEXT: vand.vx v9, v9, a1
-; LMULMAX2-RV32I-NEXT: vsub.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 209715
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV32I-NEXT: vand.vx v9, v8, a1
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV32I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v9, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 61681
-; LMULMAX2-RV32I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV32I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: lui a1, 4112
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 257
-; LMULMAX2-RV32I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 24
-; LMULMAX2-RV32I-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: ret
-;
-; LMULMAX2-RV64I-LABEL: ctlz_v4i32:
-; LMULMAX2-RV64I: # %bb.0:
-; LMULMAX2-RV64I-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX2-RV64I-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 2
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 8
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 16
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV64I-NEXT: lui a1, 349525
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV64I-NEXT: vand.vx v9, v9, a1
-; LMULMAX2-RV64I-NEXT: vsub.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: lui a1, 209715
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV64I-NEXT: vand.vx v9, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v9, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: lui a1, 61681
-; LMULMAX2-RV64I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: lui a1, 4112
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 257
-; LMULMAX2-RV64I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 24
-; LMULMAX2-RV64I-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: ret
-;
-; LMULMAX2-RV32F-LABEL: ctlz_v4i32:
-; LMULMAX2-RV32F: # %bb.0:
-; LMULMAX2-RV32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX2-RV32F-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32F-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV32F-NEXT: fsrm a1
-; LMULMAX2-RV32F-NEXT: vsrl.vi v8, v8, 23
-; LMULMAX2-RV32F-NEXT: li a1, 158
-; LMULMAX2-RV32F-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV32F-NEXT: li a1, 32
-; LMULMAX2-RV32F-NEXT: vminu.vx v8, v8, a1
-; LMULMAX2-RV32F-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: ret
-;
-; LMULMAX2-RV64F-LABEL: ctlz_v4i32:
-; LMULMAX2-RV64F: # %bb.0:
-; LMULMAX2-RV64F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX2-RV64F-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64F-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV64F-NEXT: fsrm a1
-; LMULMAX2-RV64F-NEXT: vsrl.vi v8, v8, 23
-; LMULMAX2-RV64F-NEXT: li a1, 158
-; LMULMAX2-RV64F-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV64F-NEXT: li a1, 32
-; LMULMAX2-RV64F-NEXT: vminu.vx v8, v8, a1
-; LMULMAX2-RV64F-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: ret
-;
-; LMULMAX2-RV32D-LABEL: ctlz_v4i32:
-; LMULMAX2-RV32D: # %bb.0:
-; LMULMAX2-RV32D-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX2-RV32D-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: vfwcvt.f.xu.v v10, v8
-; LMULMAX2-RV32D-NEXT: li a1, 52
-; LMULMAX2-RV32D-NEXT: vnsrl.wx v8, v10, a1
-; LMULMAX2-RV32D-NEXT: li a1, 1054
-; LMULMAX2-RV32D-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV32D-NEXT: li a1, 32
-; LMULMAX2-RV32D-NEXT: vminu.vx v8, v8, a1
-; LMULMAX2-RV32D-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: ret
-;
-; LMULMAX2-RV64D-LABEL: ctlz_v4i32:
-; LMULMAX2-RV64D: # %bb.0:
-; LMULMAX2-RV64D-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX2-RV64D-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: vfwcvt.f.xu.v v10, v8
-; LMULMAX2-RV64D-NEXT: li a1, 52
-; LMULMAX2-RV64D-NEXT: vnsrl.wx v8, v10, a1
-; LMULMAX2-RV64D-NEXT: li a1, 1054
-; LMULMAX2-RV64D-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV64D-NEXT: li a1, 32
-; LMULMAX2-RV64D-NEXT: vminu.vx v8, v8, a1
-; LMULMAX2-RV64D-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: ret
-;
-; LMULMAX8-LABEL: ctlz_v4i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX8-NEXT: vle32.v v8, (a0)
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v10, v8
-; LMULMAX8-NEXT: li a1, 52
-; LMULMAX8-NEXT: vnsrl.wx v8, v10, a1
-; LMULMAX8-NEXT: li a1, 1054
-; LMULMAX8-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX8-NEXT: li a1, 32
-; LMULMAX8-NEXT: vminu.vx v8, v8, a1
-; LMULMAX8-NEXT: vse32.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RVI-LABEL: ctlz_v4i32:
+; RVI: # %bb.0:
+; RVI-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RVI-NEXT: vle32.v v8, (a0)
+; RVI-NEXT: vsrl.vi v9, v8, 1
+; RVI-NEXT: vor.vv v8, v8, v9
+; RVI-NEXT: vsrl.vi v9, v8, 2
+; RVI-NEXT: vor.vv v8, v8, v9
+; RVI-NEXT: vsrl.vi v9, v8, 4
+; RVI-NEXT: vor.vv v8, v8, v9
+; RVI-NEXT: vsrl.vi v9, v8, 8
+; RVI-NEXT: vor.vv v8, v8, v9
+; RVI-NEXT: vsrl.vi v9, v8, 16
+; RVI-NEXT: vor.vv v8, v8, v9
+; RVI-NEXT: vnot.v v8, v8
+; RVI-NEXT: vsrl.vi v9, v8, 1
+; RVI-NEXT: lui a1, 349525
+; RVI-NEXT: addi a1, a1, 1365
+; RVI-NEXT: vand.vx v9, v9, a1
+; RVI-NEXT: vsub.vv v8, v8, v9
+; RVI-NEXT: lui a1, 209715
+; RVI-NEXT: addi a1, a1, 819
+; RVI-NEXT: vand.vx v9, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 2
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: vadd.vv v8, v9, v8
+; RVI-NEXT: vsrl.vi v9, v8, 4
+; RVI-NEXT: vadd.vv v8, v8, v9
+; RVI-NEXT: lui a1, 61681
+; RVI-NEXT: addi a1, a1, -241
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: lui a1, 4112
+; RVI-NEXT: addi a1, a1, 257
+; RVI-NEXT: vmul.vx v8, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 24
+; RVI-NEXT: vse32.v v8, (a0)
+; RVI-NEXT: ret
+;
+; RVF-LABEL: ctlz_v4i32:
+; RVF: # %bb.0:
+; RVF-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RVF-NEXT: vle32.v v8, (a0)
+; RVF-NEXT: fsrmi a1, 1
+; RVF-NEXT: vfcvt.f.xu.v v8, v8
+; RVF-NEXT: fsrm a1
+; RVF-NEXT: vsrl.vi v8, v8, 23
+; RVF-NEXT: li a1, 158
+; RVF-NEXT: vrsub.vx v8, v8, a1
+; RVF-NEXT: li a1, 32
+; RVF-NEXT: vminu.vx v8, v8, a1
+; RVF-NEXT: vse32.v v8, (a0)
+; RVF-NEXT: ret
+;
+; RVD-LABEL: ctlz_v4i32:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RVD-NEXT: vle32.v v8, (a0)
+; RVD-NEXT: vfwcvt.f.xu.v v10, v8
+; RVD-NEXT: li a1, 52
+; RVD-NEXT: vnsrl.wx v8, v10, a1
+; RVD-NEXT: li a1, 1054
+; RVD-NEXT: vrsub.vx v8, v8, a1
+; RVD-NEXT: li a1, 32
+; RVD-NEXT: vminu.vx v8, v8, a1
+; RVD-NEXT: vse32.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: ctlz_v4i32:
; ZVBB: # %bb.0:
@@ -423,192 +243,160 @@ define void @ctlz_v4i32(ptr %x, ptr %y) nounwind {
declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1)
define void @ctlz_v2i64(ptr %x, ptr %y) nounwind {
-; LMULMAX2-RV32I-LABEL: ctlz_v2i64:
-; LMULMAX2-RV32I: # %bb.0:
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 2
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 8
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 16
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: li a1, 32
-; LMULMAX2-RV32I-NEXT: vsrl.vx v9, v8, a1
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV32I-NEXT: lui a1, 349525
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vand.vv v9, v9, v10
-; LMULMAX2-RV32I-NEXT: vsub.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 209715
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v9, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vand.vv v10, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 61681
-; LMULMAX2-RV32I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v9, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 4112
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 257
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v9, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vmul.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: li a1, 56
-; LMULMAX2-RV32I-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: ret
-;
-; LMULMAX2-RV64I-LABEL: ctlz_v2i64:
-; LMULMAX2-RV64I: # %bb.0:
-; LMULMAX2-RV64I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV64I-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 2
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 8
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 16
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: li a1, 32
-; LMULMAX2-RV64I-NEXT: vsrl.vx v9, v8, a1
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV64I-NEXT: lui a1, 349525
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, 1365
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vand.vx v9, v9, a1
-; LMULMAX2-RV64I-NEXT: vsub.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: lui a1, 209715
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, 819
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vand.vx v9, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v9, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: lui a1, 61681
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, -241
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: lui a1, 4112
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, 257
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: li a1, 56
-; LMULMAX2-RV64I-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: ret
-;
-; LMULMAX2-RV32F-LABEL: ctlz_v2i64:
-; LMULMAX2-RV32F: # %bb.0:
-; LMULMAX2-RV32F-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32F-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: li a1, 190
-; LMULMAX2-RV32F-NEXT: vmv.v.x v9, a1
-; LMULMAX2-RV32F-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; LMULMAX2-RV32F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32F-NEXT: vfncvt.f.xu.w v10, v8
-; LMULMAX2-RV32F-NEXT: fsrm a1
-; LMULMAX2-RV32F-NEXT: vsrl.vi v8, v10, 23
-; LMULMAX2-RV32F-NEXT: vwsubu.wv v9, v9, v8
-; LMULMAX2-RV32F-NEXT: li a1, 64
-; LMULMAX2-RV32F-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; LMULMAX2-RV32F-NEXT: vminu.vx v8, v9, a1
-; LMULMAX2-RV32F-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: ret
-;
-; LMULMAX2-RV64F-LABEL: ctlz_v2i64:
-; LMULMAX2-RV64F: # %bb.0:
-; LMULMAX2-RV64F-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX2-RV64F-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: li a1, 190
-; LMULMAX2-RV64F-NEXT: vmv.v.x v9, a1
-; LMULMAX2-RV64F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64F-NEXT: vfncvt.f.xu.w v10, v8
-; LMULMAX2-RV64F-NEXT: fsrm a1
-; LMULMAX2-RV64F-NEXT: vsrl.vi v8, v10, 23
-; LMULMAX2-RV64F-NEXT: vwsubu.vv v10, v9, v8
-; LMULMAX2-RV64F-NEXT: li a1, 64
-; LMULMAX2-RV64F-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; LMULMAX2-RV64F-NEXT: vminu.vx v8, v10, a1
-; LMULMAX2-RV64F-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: ret
-;
-; LMULMAX2-RV32D-LABEL: ctlz_v2i64:
-; LMULMAX2-RV32D: # %bb.0:
-; LMULMAX2-RV32D-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32D-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32D-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV32D-NEXT: fsrm a1
-; LMULMAX2-RV32D-NEXT: li a1, 52
-; LMULMAX2-RV32D-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV32D-NEXT: li a1, 1086
-; LMULMAX2-RV32D-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV32D-NEXT: li a1, 64
-; LMULMAX2-RV32D-NEXT: vminu.vx v8, v8, a1
-; LMULMAX2-RV32D-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: ret
-;
-; LMULMAX2-RV64D-LABEL: ctlz_v2i64:
-; LMULMAX2-RV64D: # %bb.0:
-; LMULMAX2-RV64D-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV64D-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64D-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV64D-NEXT: fsrm a1
-; LMULMAX2-RV64D-NEXT: li a1, 52
-; LMULMAX2-RV64D-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV64D-NEXT: li a1, 1086
-; LMULMAX2-RV64D-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV64D-NEXT: li a1, 64
-; LMULMAX2-RV64D-NEXT: vminu.vx v8, v8, a1
-; LMULMAX2-RV64D-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: ret
-;
-; LMULMAX8-LABEL: ctlz_v2i64:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX8-NEXT: vle64.v v8, (a0)
-; LMULMAX8-NEXT: fsrmi a1, 1
-; LMULMAX8-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX8-NEXT: fsrm a1
-; LMULMAX8-NEXT: li a1, 52
-; LMULMAX8-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX8-NEXT: li a1, 1086
-; LMULMAX8-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX8-NEXT: li a1, 64
-; LMULMAX8-NEXT: vminu.vx v8, v8, a1
-; LMULMAX8-NEXT: vse64.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RV32I-LABEL: ctlz_v2i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32I-NEXT: vle64.v v8, (a0)
+; RV32I-NEXT: vsrl.vi v9, v8, 1
+; RV32I-NEXT: vor.vv v8, v8, v9
+; RV32I-NEXT: vsrl.vi v9, v8, 2
+; RV32I-NEXT: vor.vv v8, v8, v9
+; RV32I-NEXT: vsrl.vi v9, v8, 4
+; RV32I-NEXT: vor.vv v8, v8, v9
+; RV32I-NEXT: vsrl.vi v9, v8, 8
+; RV32I-NEXT: vor.vv v8, v8, v9
+; RV32I-NEXT: vsrl.vi v9, v8, 16
+; RV32I-NEXT: vor.vv v8, v8, v9
+; RV32I-NEXT: li a1, 32
+; RV32I-NEXT: vsrl.vx v9, v8, a1
+; RV32I-NEXT: vor.vv v8, v8, v9
+; RV32I-NEXT: vnot.v v8, v8
+; RV32I-NEXT: vsrl.vi v9, v8, 1
+; RV32I-NEXT: lui a1, 349525
+; RV32I-NEXT: addi a1, a1, 1365
+; RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
+; RV32I-NEXT: vmv.v.x v10, a1
+; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32I-NEXT: vand.vv v9, v9, v10
+; RV32I-NEXT: vsub.vv v8, v8, v9
+; RV32I-NEXT: lui a1, 209715
+; RV32I-NEXT: addi a1, a1, 819
+; RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
+; RV32I-NEXT: vmv.v.x v9, a1
+; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32I-NEXT: vand.vv v10, v8, v9
+; RV32I-NEXT: vsrl.vi v8, v8, 2
+; RV32I-NEXT: vand.vv v8, v8, v9
+; RV32I-NEXT: vadd.vv v8, v10, v8
+; RV32I-NEXT: vsrl.vi v9, v8, 4
+; RV32I-NEXT: vadd.vv v8, v8, v9
+; RV32I-NEXT: lui a1, 61681
+; RV32I-NEXT: addi a1, a1, -241
+; RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
+; RV32I-NEXT: vmv.v.x v9, a1
+; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32I-NEXT: vand.vv v8, v8, v9
+; RV32I-NEXT: lui a1, 4112
+; RV32I-NEXT: addi a1, a1, 257
+; RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
+; RV32I-NEXT: vmv.v.x v9, a1
+; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32I-NEXT: vmul.vv v8, v8, v9
+; RV32I-NEXT: li a1, 56
+; RV32I-NEXT: vsrl.vx v8, v8, a1
+; RV32I-NEXT: vse64.v v8, (a0)
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: ctlz_v2i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64I-NEXT: vle64.v v8, (a0)
+; RV64I-NEXT: vsrl.vi v9, v8, 1
+; RV64I-NEXT: vor.vv v8, v8, v9
+; RV64I-NEXT: vsrl.vi v9, v8, 2
+; RV64I-NEXT: vor.vv v8, v8, v9
+; RV64I-NEXT: vsrl.vi v9, v8, 4
+; RV64I-NEXT: vor.vv v8, v8, v9
+; RV64I-NEXT: vsrl.vi v9, v8, 8
+; RV64I-NEXT: vor.vv v8, v8, v9
+; RV64I-NEXT: vsrl.vi v9, v8, 16
+; RV64I-NEXT: vor.vv v8, v8, v9
+; RV64I-NEXT: li a1, 32
+; RV64I-NEXT: vsrl.vx v9, v8, a1
+; RV64I-NEXT: vor.vv v8, v8, v9
+; RV64I-NEXT: vnot.v v8, v8
+; RV64I-NEXT: vsrl.vi v9, v8, 1
+; RV64I-NEXT: lui a1, 349525
+; RV64I-NEXT: addiw a1, a1, 1365
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vand.vx v9, v9, a1
+; RV64I-NEXT: vsub.vv v8, v8, v9
+; RV64I-NEXT: lui a1, 209715
+; RV64I-NEXT: addiw a1, a1, 819
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vand.vx v9, v8, a1
+; RV64I-NEXT: vsrl.vi v8, v8, 2
+; RV64I-NEXT: vand.vx v8, v8, a1
+; RV64I-NEXT: vadd.vv v8, v9, v8
+; RV64I-NEXT: vsrl.vi v9, v8, 4
+; RV64I-NEXT: vadd.vv v8, v8, v9
+; RV64I-NEXT: lui a1, 61681
+; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vand.vx v8, v8, a1
+; RV64I-NEXT: lui a1, 4112
+; RV64I-NEXT: addiw a1, a1, 257
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vmul.vx v8, v8, a1
+; RV64I-NEXT: li a1, 56
+; RV64I-NEXT: vsrl.vx v8, v8, a1
+; RV64I-NEXT: vse64.v v8, (a0)
+; RV64I-NEXT: ret
+;
+; RV32F-LABEL: ctlz_v2i64:
+; RV32F: # %bb.0:
+; RV32F-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32F-NEXT: vle64.v v8, (a0)
+; RV32F-NEXT: li a1, 190
+; RV32F-NEXT: vmv.v.x v9, a1
+; RV32F-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32F-NEXT: fsrmi a1, 1
+; RV32F-NEXT: vfncvt.f.xu.w v10, v8
+; RV32F-NEXT: fsrm a1
+; RV32F-NEXT: vsrl.vi v8, v10, 23
+; RV32F-NEXT: vwsubu.wv v9, v9, v8
+; RV32F-NEXT: li a1, 64
+; RV32F-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32F-NEXT: vminu.vx v8, v9, a1
+; RV32F-NEXT: vse64.v v8, (a0)
+; RV32F-NEXT: ret
+;
+; RV64F-LABEL: ctlz_v2i64:
+; RV64F: # %bb.0:
+; RV64F-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; RV64F-NEXT: vle64.v v8, (a0)
+; RV64F-NEXT: li a1, 190
+; RV64F-NEXT: vmv.v.x v9, a1
+; RV64F-NEXT: fsrmi a1, 1
+; RV64F-NEXT: vfncvt.f.xu.w v10, v8
+; RV64F-NEXT: fsrm a1
+; RV64F-NEXT: vsrl.vi v8, v10, 23
+; RV64F-NEXT: vwsubu.vv v10, v9, v8
+; RV64F-NEXT: li a1, 64
+; RV64F-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64F-NEXT: vminu.vx v8, v10, a1
+; RV64F-NEXT: vse64.v v8, (a0)
+; RV64F-NEXT: ret
+;
+; RVD-LABEL: ctlz_v2i64:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RVD-NEXT: vle64.v v8, (a0)
+; RVD-NEXT: fsrmi a1, 1
+; RVD-NEXT: vfcvt.f.xu.v v8, v8
+; RVD-NEXT: fsrm a1
+; RVD-NEXT: li a1, 52
+; RVD-NEXT: vsrl.vx v8, v8, a1
+; RVD-NEXT: li a1, 1086
+; RVD-NEXT: vrsub.vx v8, v8, a1
+; RVD-NEXT: li a1, 64
+; RVD-NEXT: vminu.vx v8, v8, a1
+; RVD-NEXT: vse64.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: ctlz_v2i64:
; ZVBB: # %bb.0:
@@ -626,95 +414,66 @@ define void @ctlz_v2i64(ptr %x, ptr %y) nounwind {
declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1)
define void @ctlz_v32i8(ptr %x, ptr %y) nounwind {
-; LMULMAX2-LABEL: ctlz_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a1, 32
-; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX2-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-NEXT: vnot.v v8, v8
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-NEXT: li a1, 85
-; LMULMAX2-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-NEXT: li a1, 51
-; LMULMAX2-NEXT: vand.vx v10, v8, a1
-; LMULMAX2-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-NEXT: vand.vi v8, v8, 15
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: ctlz_v32i8:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vle8.v v8, (a1)
-; LMULMAX1-NEXT: vle8.v v9, (a0)
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX1-NEXT: vor.vv v8, v8, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX1-NEXT: vor.vv v8, v8, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX1-NEXT: vor.vv v8, v8, v10
-; LMULMAX1-NEXT: vnot.v v8, v8
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX1-NEXT: li a2, 85
-; LMULMAX1-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-NEXT: vsub.vv v8, v8, v10
-; LMULMAX1-NEXT: li a3, 51
-; LMULMAX1-NEXT: vand.vx v10, v8, a3
-; LMULMAX1-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX1-NEXT: vand.vx v8, v8, a3
-; LMULMAX1-NEXT: vadd.vv v8, v10, v8
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX1-NEXT: vadd.vv v8, v8, v10
-; LMULMAX1-NEXT: vand.vi v8, v8, 15
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 1
-; LMULMAX1-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 2
-; LMULMAX1-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 4
-; LMULMAX1-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-NEXT: vnot.v v9, v9
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 1
-; LMULMAX1-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-NEXT: vsub.vv v9, v9, v10
-; LMULMAX1-NEXT: vand.vx v10, v9, a3
-; LMULMAX1-NEXT: vsrl.vi v9, v9, 2
-; LMULMAX1-NEXT: vand.vx v9, v9, a3
-; LMULMAX1-NEXT: vadd.vv v9, v10, v9
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 4
-; LMULMAX1-NEXT: vadd.vv v9, v9, v10
-; LMULMAX1-NEXT: vand.vi v9, v9, 15
-; LMULMAX1-NEXT: vse8.v v9, (a0)
-; LMULMAX1-NEXT: vse8.v v8, (a1)
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX8-LABEL: ctlz_v32i8:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a1, 32
-; LMULMAX8-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; LMULMAX8-NEXT: vle8.v v8, (a0)
-; LMULMAX8-NEXT: vzext.vf2 v12, v8
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v16, v12
-; LMULMAX8-NEXT: vnsrl.wi v8, v16, 23
-; LMULMAX8-NEXT: vsetvli zero, zero, e8, m2, ta, ma
-; LMULMAX8-NEXT: vnsrl.wi v12, v8, 0
-; LMULMAX8-NEXT: li a1, 134
-; LMULMAX8-NEXT: vrsub.vx v8, v12, a1
-; LMULMAX8-NEXT: li a1, 8
-; LMULMAX8-NEXT: vminu.vx v8, v8, a1
-; LMULMAX8-NEXT: vse8.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RVI-LABEL: ctlz_v32i8:
+; RVI: # %bb.0:
+; RVI-NEXT: li a1, 32
+; RVI-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RVI-NEXT: vle8.v v8, (a0)
+; RVI-NEXT: vsrl.vi v10, v8, 1
+; RVI-NEXT: vor.vv v8, v8, v10
+; RVI-NEXT: vsrl.vi v10, v8, 2
+; RVI-NEXT: vor.vv v8, v8, v10
+; RVI-NEXT: vsrl.vi v10, v8, 4
+; RVI-NEXT: vor.vv v8, v8, v10
+; RVI-NEXT: vnot.v v8, v8
+; RVI-NEXT: vsrl.vi v10, v8, 1
+; RVI-NEXT: li a1, 85
+; RVI-NEXT: vand.vx v10, v10, a1
+; RVI-NEXT: vsub.vv v8, v8, v10
+; RVI-NEXT: li a1, 51
+; RVI-NEXT: vand.vx v10, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 2
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: vadd.vv v8, v10, v8
+; RVI-NEXT: vsrl.vi v10, v8, 4
+; RVI-NEXT: vadd.vv v8, v8, v10
+; RVI-NEXT: vand.vi v8, v8, 15
+; RVI-NEXT: vse8.v v8, (a0)
+; RVI-NEXT: ret
+;
+; RVF-LABEL: ctlz_v32i8:
+; RVF: # %bb.0:
+; RVF-NEXT: li a1, 32
+; RVF-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; RVF-NEXT: vle8.v v8, (a0)
+; RVF-NEXT: vzext.vf2 v12, v8
+; RVF-NEXT: vfwcvt.f.xu.v v16, v12
+; RVF-NEXT: vnsrl.wi v8, v16, 23
+; RVF-NEXT: vsetvli zero, zero, e8, m2, ta, ma
+; RVF-NEXT: vnsrl.wi v12, v8, 0
+; RVF-NEXT: li a1, 134
+; RVF-NEXT: vrsub.vx v8, v12, a1
+; RVF-NEXT: li a1, 8
+; RVF-NEXT: vminu.vx v8, v8, a1
+; RVF-NEXT: vse8.v v8, (a0)
+; RVF-NEXT: ret
+;
+; RVD-LABEL: ctlz_v32i8:
+; RVD: # %bb.0:
+; RVD-NEXT: li a1, 32
+; RVD-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; RVD-NEXT: vle8.v v8, (a0)
+; RVD-NEXT: vzext.vf2 v12, v8
+; RVD-NEXT: vfwcvt.f.xu.v v16, v12
+; RVD-NEXT: vnsrl.wi v8, v16, 23
+; RVD-NEXT: vsetvli zero, zero, e8, m2, ta, ma
+; RVD-NEXT: vnsrl.wi v12, v8, 0
+; RVD-NEXT: li a1, 134
+; RVD-NEXT: vrsub.vx v8, v12, a1
+; RVD-NEXT: li a1, 8
+; RVD-NEXT: vminu.vx v8, v8, a1
+; RVD-NEXT: vse8.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: ctlz_v32i8:
; ZVBB: # %bb.0:
@@ -733,112 +492,66 @@ define void @ctlz_v32i8(ptr %x, ptr %y) nounwind {
declare <32 x i8> @llvm.ctlz.v32i8(<32 x i8>, i1)
define void @ctlz_v16i16(ptr %x, ptr %y) nounwind {
-; LMULMAX2-LABEL: ctlz_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX2-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX2-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-NEXT: vnot.v v8, v8
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-NEXT: lui a1, 5
-; LMULMAX2-NEXT: addi a1, a1, 1365
-; LMULMAX2-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-NEXT: lui a1, 3
-; LMULMAX2-NEXT: addi a1, a1, 819
-; LMULMAX2-NEXT: vand.vx v10, v8, a1
-; LMULMAX2-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-NEXT: lui a1, 1
-; LMULMAX2-NEXT: addi a1, a1, -241
-; LMULMAX2-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-NEXT: li a1, 257
-; LMULMAX2-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-NEXT: vsrl.vi v8, v8, 8
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: ctlz_v16i16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vle16.v v8, (a1)
-; LMULMAX1-NEXT: vle16.v v9, (a0)
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX1-NEXT: vor.vv v8, v8, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX1-NEXT: vor.vv v8, v8, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX1-NEXT: vor.vv v8, v8, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX1-NEXT: vor.vv v8, v8, v10
-; LMULMAX1-NEXT: vnot.v v8, v8
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX1-NEXT: lui a2, 5
-; LMULMAX1-NEXT: addi a2, a2, 1365
-; LMULMAX1-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-NEXT: vsub.vv v8, v8, v10
-; LMULMAX1-NEXT: lui a3, 3
-; LMULMAX1-NEXT: addi a3, a3, 819
-; LMULMAX1-NEXT: vand.vx v10, v8, a3
-; LMULMAX1-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX1-NEXT: vand.vx v8, v8, a3
-; LMULMAX1-NEXT: vadd.vv v8, v10, v8
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX1-NEXT: vadd.vv v8, v8, v10
-; LMULMAX1-NEXT: lui a4, 1
-; LMULMAX1-NEXT: addi a4, a4, -241
-; LMULMAX1-NEXT: vand.vx v8, v8, a4
-; LMULMAX1-NEXT: li a5, 257
-; LMULMAX1-NEXT: vmul.vx v8, v8, a5
-; LMULMAX1-NEXT: vsrl.vi v8, v8, 8
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 1
-; LMULMAX1-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 2
-; LMULMAX1-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 4
-; LMULMAX1-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 8
-; LMULMAX1-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-NEXT: vnot.v v9, v9
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 1
-; LMULMAX1-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-NEXT: vsub.vv v9, v9, v10
-; LMULMAX1-NEXT: vand.vx v10, v9, a3
-; LMULMAX1-NEXT: vsrl.vi v9, v9, 2
-; LMULMAX1-NEXT: vand.vx v9, v9, a3
-; LMULMAX1-NEXT: vadd.vv v9, v10, v9
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 4
-; LMULMAX1-NEXT: vadd.vv v9, v9, v10
-; LMULMAX1-NEXT: vand.vx v9, v9, a4
-; LMULMAX1-NEXT: vmul.vx v9, v9, a5
-; LMULMAX1-NEXT: vsrl.vi v9, v9, 8
-; LMULMAX1-NEXT: vse16.v v9, (a0)
-; LMULMAX1-NEXT: vse16.v v8, (a1)
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX8-LABEL: ctlz_v16i16:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX8-NEXT: vle16.v v8, (a0)
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v12, v8
-; LMULMAX8-NEXT: vnsrl.wi v8, v12, 23
-; LMULMAX8-NEXT: li a1, 142
-; LMULMAX8-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX8-NEXT: li a1, 16
-; LMULMAX8-NEXT: vminu.vx v8, v8, a1
-; LMULMAX8-NEXT: vse16.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RVI-LABEL: ctlz_v16i16:
+; RVI: # %bb.0:
+; RVI-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RVI-NEXT: vle16.v v8, (a0)
+; RVI-NEXT: vsrl.vi v10, v8, 1
+; RVI-NEXT: vor.vv v8, v8, v10
+; RVI-NEXT: vsrl.vi v10, v8, 2
+; RVI-NEXT: vor.vv v8, v8, v10
+; RVI-NEXT: vsrl.vi v10, v8, 4
+; RVI-NEXT: vor.vv v8, v8, v10
+; RVI-NEXT: vsrl.vi v10, v8, 8
+; RVI-NEXT: vor.vv v8, v8, v10
+; RVI-NEXT: vnot.v v8, v8
+; RVI-NEXT: vsrl.vi v10, v8, 1
+; RVI-NEXT: lui a1, 5
+; RVI-NEXT: addi a1, a1, 1365
+; RVI-NEXT: vand.vx v10, v10, a1
+; RVI-NEXT: vsub.vv v8, v8, v10
+; RVI-NEXT: lui a1, 3
+; RVI-NEXT: addi a1, a1, 819
+; RVI-NEXT: vand.vx v10, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 2
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: vadd.vv v8, v10, v8
+; RVI-NEXT: vsrl.vi v10, v8, 4
+; RVI-NEXT: vadd.vv v8, v8, v10
+; RVI-NEXT: lui a1, 1
+; RVI-NEXT: addi a1, a1, -241
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: li a1, 257
+; RVI-NEXT: vmul.vx v8, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 8
+; RVI-NEXT: vse16.v v8, (a0)
+; RVI-NEXT: ret
+;
+; RVF-LABEL: ctlz_v16i16:
+; RVF: # %bb.0:
+; RVF-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RVF-NEXT: vle16.v v8, (a0)
+; RVF-NEXT: vfwcvt.f.xu.v v12, v8
+; RVF-NEXT: vnsrl.wi v8, v12, 23
+; RVF-NEXT: li a1, 142
+; RVF-NEXT: vrsub.vx v8, v8, a1
+; RVF-NEXT: li a1, 16
+; RVF-NEXT: vminu.vx v8, v8, a1
+; RVF-NEXT: vse16.v v8, (a0)
+; RVF-NEXT: ret
+;
+; RVD-LABEL: ctlz_v16i16:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RVD-NEXT: vle16.v v8, (a0)
+; RVD-NEXT: vfwcvt.f.xu.v v12, v8
+; RVD-NEXT: vnsrl.wi v8, v12, 23
+; RVD-NEXT: li a1, 142
+; RVD-NEXT: vrsub.vx v8, v8, a1
+; RVD-NEXT: li a1, 16
+; RVD-NEXT: vminu.vx v8, v8, a1
+; RVD-NEXT: vse16.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: ctlz_v16i16:
; ZVBB: # %bb.0:
@@ -856,155 +569,72 @@ define void @ctlz_v16i16(ptr %x, ptr %y) nounwind {
declare <16 x i16> @llvm.ctlz.v16i16(<16 x i16>, i1)
define void @ctlz_v8i32(ptr %x, ptr %y) nounwind {
-; LMULMAX2-RV32I-LABEL: ctlz_v8i32:
-; LMULMAX2-RV32I: # %bb.0:
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 16
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV32I-NEXT: lui a1, 349525
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV32I-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV32I-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: lui a1, 209715
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV32I-NEXT: vand.vx v10, v8, a1
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV32I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: lui a1, 61681
-; LMULMAX2-RV32I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV32I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: lui a1, 4112
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 257
-; LMULMAX2-RV32I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 24
-; LMULMAX2-RV32I-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: ret
-;
-; LMULMAX2-RV64I-LABEL: ctlz_v8i32:
-; LMULMAX2-RV64I: # %bb.0:
-; LMULMAX2-RV64I-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV64I-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 16
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV64I-NEXT: lui a1, 349525
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV64I-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV64I-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: lui a1, 209715
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV64I-NEXT: vand.vx v10, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: lui a1, 61681
-; LMULMAX2-RV64I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: lui a1, 4112
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 257
-; LMULMAX2-RV64I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 24
-; LMULMAX2-RV64I-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: ret
-;
-; LMULMAX2-RV32F-LABEL: ctlz_v8i32:
-; LMULMAX2-RV32F: # %bb.0:
-; LMULMAX2-RV32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV32F-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32F-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV32F-NEXT: fsrm a1
-; LMULMAX2-RV32F-NEXT: vsrl.vi v8, v8, 23
-; LMULMAX2-RV32F-NEXT: li a1, 158
-; LMULMAX2-RV32F-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV32F-NEXT: li a1, 32
-; LMULMAX2-RV32F-NEXT: vminu.vx v8, v8, a1
-; LMULMAX2-RV32F-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: ret
-;
-; LMULMAX2-RV64F-LABEL: ctlz_v8i32:
-; LMULMAX2-RV64F: # %bb.0:
-; LMULMAX2-RV64F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV64F-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64F-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV64F-NEXT: fsrm a1
-; LMULMAX2-RV64F-NEXT: vsrl.vi v8, v8, 23
-; LMULMAX2-RV64F-NEXT: li a1, 158
-; LMULMAX2-RV64F-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV64F-NEXT: li a1, 32
-; LMULMAX2-RV64F-NEXT: vminu.vx v8, v8, a1
-; LMULMAX2-RV64F-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: ret
-;
-; LMULMAX2-RV32D-LABEL: ctlz_v8i32:
-; LMULMAX2-RV32D: # %bb.0:
-; LMULMAX2-RV32D-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV32D-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32D-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV32D-NEXT: fsrm a1
-; LMULMAX2-RV32D-NEXT: vsrl.vi v8, v8, 23
-; LMULMAX2-RV32D-NEXT: li a1, 158
-; LMULMAX2-RV32D-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV32D-NEXT: li a1, 32
-; LMULMAX2-RV32D-NEXT: vminu.vx v8, v8, a1
-; LMULMAX2-RV32D-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: ret
-;
-; LMULMAX2-RV64D-LABEL: ctlz_v8i32:
-; LMULMAX2-RV64D: # %bb.0:
-; LMULMAX2-RV64D-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV64D-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64D-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV64D-NEXT: fsrm a1
-; LMULMAX2-RV64D-NEXT: vsrl.vi v8, v8, 23
-; LMULMAX2-RV64D-NEXT: li a1, 158
-; LMULMAX2-RV64D-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV64D-NEXT: li a1, 32
-; LMULMAX2-RV64D-NEXT: vminu.vx v8, v8, a1
-; LMULMAX2-RV64D-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: ret
-;
-; LMULMAX8-LABEL: ctlz_v8i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX8-NEXT: vle32.v v8, (a0)
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v12, v8
-; LMULMAX8-NEXT: li a1, 52
-; LMULMAX8-NEXT: vnsrl.wx v8, v12, a1
-; LMULMAX8-NEXT: li a1, 1054
-; LMULMAX8-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX8-NEXT: li a1, 32
-; LMULMAX8-NEXT: vminu.vx v8, v8, a1
-; LMULMAX8-NEXT: vse32.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RVI-LABEL: ctlz_v8i32:
+; RVI: # %bb.0:
+; RVI-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RVI-NEXT: vle32.v v8, (a0)
+; RVI-NEXT: vsrl.vi v10, v8, 1
+; RVI-NEXT: vor.vv v8, v8, v10
+; RVI-NEXT: vsrl.vi v10, v8, 2
+; RVI-NEXT: vor.vv v8, v8, v10
+; RVI-NEXT: vsrl.vi v10, v8, 4
+; RVI-NEXT: vor.vv v8, v8, v10
+; RVI-NEXT: vsrl.vi v10, v8, 8
+; RVI-NEXT: vor.vv v8, v8, v10
+; RVI-NEXT: vsrl.vi v10, v8, 16
+; RVI-NEXT: vor.vv v8, v8, v10
+; RVI-NEXT: vnot.v v8, v8
+; RVI-NEXT: vsrl.vi v10, v8, 1
+; RVI-NEXT: lui a1, 349525
+; RVI-NEXT: addi a1, a1, 1365
+; RVI-NEXT: vand.vx v10, v10, a1
+; RVI-NEXT: vsub.vv v8, v8, v10
+; RVI-NEXT: lui a1, 209715
+; RVI-NEXT: addi a1, a1, 819
+; RVI-NEXT: vand.vx v10, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 2
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: vadd.vv v8, v10, v8
+; RVI-NEXT: vsrl.vi v10, v8, 4
+; RVI-NEXT: vadd.vv v8, v8, v10
+; RVI-NEXT: lui a1, 61681
+; RVI-NEXT: addi a1, a1, -241
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: lui a1, 4112
+; RVI-NEXT: addi a1, a1, 257
+; RVI-NEXT: vmul.vx v8, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 24
+; RVI-NEXT: vse32.v v8, (a0)
+; RVI-NEXT: ret
+;
+; RVF-LABEL: ctlz_v8i32:
+; RVF: # %bb.0:
+; RVF-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RVF-NEXT: vle32.v v8, (a0)
+; RVF-NEXT: fsrmi a1, 1
+; RVF-NEXT: vfcvt.f.xu.v v8, v8
+; RVF-NEXT: fsrm a1
+; RVF-NEXT: vsrl.vi v8, v8, 23
+; RVF-NEXT: li a1, 158
+; RVF-NEXT: vrsub.vx v8, v8, a1
+; RVF-NEXT: li a1, 32
+; RVF-NEXT: vminu.vx v8, v8, a1
+; RVF-NEXT: vse32.v v8, (a0)
+; RVF-NEXT: ret
+;
+; RVD-LABEL: ctlz_v8i32:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RVD-NEXT: vle32.v v8, (a0)
+; RVD-NEXT: vfwcvt.f.xu.v v12, v8
+; RVD-NEXT: li a1, 52
+; RVD-NEXT: vnsrl.wx v8, v12, a1
+; RVD-NEXT: li a1, 1054
+; RVD-NEXT: vrsub.vx v8, v8, a1
+; RVD-NEXT: li a1, 32
+; RVD-NEXT: vminu.vx v8, v8, a1
+; RVD-NEXT: vse32.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: ctlz_v8i32:
; ZVBB: # %bb.0:
@@ -1022,192 +652,160 @@ define void @ctlz_v8i32(ptr %x, ptr %y) nounwind {
declare <8 x i32> @llvm.ctlz.v8i32(<8 x i32>, i1)
define void @ctlz_v4i64(ptr %x, ptr %y) nounwind {
-; LMULMAX2-RV32I-LABEL: ctlz_v4i64:
-; LMULMAX2-RV32I: # %bb.0:
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 16
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: li a1, 32
-; LMULMAX2-RV32I-NEXT: vsrl.vx v10, v8, a1
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV32I-NEXT: lui a1, 349525
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v12, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vand.vv v10, v10, v12
-; LMULMAX2-RV32I-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: lui a1, 209715
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vand.vv v12, v8, v10
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v12, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: lui a1, 61681
-; LMULMAX2-RV32I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: lui a1, 4112
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 257
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vmul.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: li a1, 56
-; LMULMAX2-RV32I-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: ret
-;
-; LMULMAX2-RV64I-LABEL: ctlz_v4i64:
-; LMULMAX2-RV64I: # %bb.0:
-; LMULMAX2-RV64I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV64I-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 16
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: li a1, 32
-; LMULMAX2-RV64I-NEXT: vsrl.vx v10, v8, a1
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV64I-NEXT: lui a1, 349525
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, 1365
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV64I-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: lui a1, 209715
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, 819
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vand.vx v10, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: lui a1, 61681
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, -241
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: lui a1, 4112
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, 257
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: li a1, 56
-; LMULMAX2-RV64I-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: ret
-;
-; LMULMAX2-RV32F-LABEL: ctlz_v4i64:
-; LMULMAX2-RV32F: # %bb.0:
-; LMULMAX2-RV32F-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32F-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: li a1, 190
-; LMULMAX2-RV32F-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; LMULMAX2-RV32F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32F-NEXT: vfncvt.f.xu.w v12, v8
-; LMULMAX2-RV32F-NEXT: fsrm a1
-; LMULMAX2-RV32F-NEXT: vsrl.vi v8, v12, 23
-; LMULMAX2-RV32F-NEXT: vwsubu.wv v10, v10, v8
-; LMULMAX2-RV32F-NEXT: li a1, 64
-; LMULMAX2-RV32F-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; LMULMAX2-RV32F-NEXT: vminu.vx v8, v10, a1
-; LMULMAX2-RV32F-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: ret
-;
-; LMULMAX2-RV64F-LABEL: ctlz_v4i64:
-; LMULMAX2-RV64F: # %bb.0:
-; LMULMAX2-RV64F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX2-RV64F-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: li a1, 190
-; LMULMAX2-RV64F-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV64F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64F-NEXT: vfncvt.f.xu.w v11, v8
-; LMULMAX2-RV64F-NEXT: fsrm a1
-; LMULMAX2-RV64F-NEXT: vsrl.vi v8, v11, 23
-; LMULMAX2-RV64F-NEXT: vwsubu.vv v12, v10, v8
-; LMULMAX2-RV64F-NEXT: li a1, 64
-; LMULMAX2-RV64F-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; LMULMAX2-RV64F-NEXT: vminu.vx v8, v12, a1
-; LMULMAX2-RV64F-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: ret
-;
-; LMULMAX2-RV32D-LABEL: ctlz_v4i64:
-; LMULMAX2-RV32D: # %bb.0:
-; LMULMAX2-RV32D-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32D-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32D-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV32D-NEXT: fsrm a1
-; LMULMAX2-RV32D-NEXT: li a1, 52
-; LMULMAX2-RV32D-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV32D-NEXT: li a1, 1086
-; LMULMAX2-RV32D-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV32D-NEXT: li a1, 64
-; LMULMAX2-RV32D-NEXT: vminu.vx v8, v8, a1
-; LMULMAX2-RV32D-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: ret
-;
-; LMULMAX2-RV64D-LABEL: ctlz_v4i64:
-; LMULMAX2-RV64D: # %bb.0:
-; LMULMAX2-RV64D-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV64D-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64D-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV64D-NEXT: fsrm a1
-; LMULMAX2-RV64D-NEXT: li a1, 52
-; LMULMAX2-RV64D-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV64D-NEXT: li a1, 1086
-; LMULMAX2-RV64D-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV64D-NEXT: li a1, 64
-; LMULMAX2-RV64D-NEXT: vminu.vx v8, v8, a1
-; LMULMAX2-RV64D-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: ret
-;
-; LMULMAX8-LABEL: ctlz_v4i64:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX8-NEXT: vle64.v v8, (a0)
-; LMULMAX8-NEXT: fsrmi a1, 1
-; LMULMAX8-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX8-NEXT: fsrm a1
-; LMULMAX8-NEXT: li a1, 52
-; LMULMAX8-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX8-NEXT: li a1, 1086
-; LMULMAX8-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX8-NEXT: li a1, 64
-; LMULMAX8-NEXT: vminu.vx v8, v8, a1
-; LMULMAX8-NEXT: vse64.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RV32I-LABEL: ctlz_v4i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32I-NEXT: vle64.v v8, (a0)
+; RV32I-NEXT: vsrl.vi v10, v8, 1
+; RV32I-NEXT: vor.vv v8, v8, v10
+; RV32I-NEXT: vsrl.vi v10, v8, 2
+; RV32I-NEXT: vor.vv v8, v8, v10
+; RV32I-NEXT: vsrl.vi v10, v8, 4
+; RV32I-NEXT: vor.vv v8, v8, v10
+; RV32I-NEXT: vsrl.vi v10, v8, 8
+; RV32I-NEXT: vor.vv v8, v8, v10
+; RV32I-NEXT: vsrl.vi v10, v8, 16
+; RV32I-NEXT: vor.vv v8, v8, v10
+; RV32I-NEXT: li a1, 32
+; RV32I-NEXT: vsrl.vx v10, v8, a1
+; RV32I-NEXT: vor.vv v8, v8, v10
+; RV32I-NEXT: vnot.v v8, v8
+; RV32I-NEXT: vsrl.vi v10, v8, 1
+; RV32I-NEXT: lui a1, 349525
+; RV32I-NEXT: addi a1, a1, 1365
+; RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; RV32I-NEXT: vmv.v.x v12, a1
+; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32I-NEXT: vand.vv v10, v10, v12
+; RV32I-NEXT: vsub.vv v8, v8, v10
+; RV32I-NEXT: lui a1, 209715
+; RV32I-NEXT: addi a1, a1, 819
+; RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; RV32I-NEXT: vmv.v.x v10, a1
+; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32I-NEXT: vand.vv v12, v8, v10
+; RV32I-NEXT: vsrl.vi v8, v8, 2
+; RV32I-NEXT: vand.vv v8, v8, v10
+; RV32I-NEXT: vadd.vv v8, v12, v8
+; RV32I-NEXT: vsrl.vi v10, v8, 4
+; RV32I-NEXT: vadd.vv v8, v8, v10
+; RV32I-NEXT: lui a1, 61681
+; RV32I-NEXT: addi a1, a1, -241
+; RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; RV32I-NEXT: vmv.v.x v10, a1
+; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32I-NEXT: vand.vv v8, v8, v10
+; RV32I-NEXT: lui a1, 4112
+; RV32I-NEXT: addi a1, a1, 257
+; RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; RV32I-NEXT: vmv.v.x v10, a1
+; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32I-NEXT: vmul.vv v8, v8, v10
+; RV32I-NEXT: li a1, 56
+; RV32I-NEXT: vsrl.vx v8, v8, a1
+; RV32I-NEXT: vse64.v v8, (a0)
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: ctlz_v4i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64I-NEXT: vle64.v v8, (a0)
+; RV64I-NEXT: vsrl.vi v10, v8, 1
+; RV64I-NEXT: vor.vv v8, v8, v10
+; RV64I-NEXT: vsrl.vi v10, v8, 2
+; RV64I-NEXT: vor.vv v8, v8, v10
+; RV64I-NEXT: vsrl.vi v10, v8, 4
+; RV64I-NEXT: vor.vv v8, v8, v10
+; RV64I-NEXT: vsrl.vi v10, v8, 8
+; RV64I-NEXT: vor.vv v8, v8, v10
+; RV64I-NEXT: vsrl.vi v10, v8, 16
+; RV64I-NEXT: vor.vv v8, v8, v10
+; RV64I-NEXT: li a1, 32
+; RV64I-NEXT: vsrl.vx v10, v8, a1
+; RV64I-NEXT: vor.vv v8, v8, v10
+; RV64I-NEXT: vnot.v v8, v8
+; RV64I-NEXT: vsrl.vi v10, v8, 1
+; RV64I-NEXT: lui a1, 349525
+; RV64I-NEXT: addiw a1, a1, 1365
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vand.vx v10, v10, a1
+; RV64I-NEXT: vsub.vv v8, v8, v10
+; RV64I-NEXT: lui a1, 209715
+; RV64I-NEXT: addiw a1, a1, 819
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vand.vx v10, v8, a1
+; RV64I-NEXT: vsrl.vi v8, v8, 2
+; RV64I-NEXT: vand.vx v8, v8, a1
+; RV64I-NEXT: vadd.vv v8, v10, v8
+; RV64I-NEXT: vsrl.vi v10, v8, 4
+; RV64I-NEXT: vadd.vv v8, v8, v10
+; RV64I-NEXT: lui a1, 61681
+; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vand.vx v8, v8, a1
+; RV64I-NEXT: lui a1, 4112
+; RV64I-NEXT: addiw a1, a1, 257
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vmul.vx v8, v8, a1
+; RV64I-NEXT: li a1, 56
+; RV64I-NEXT: vsrl.vx v8, v8, a1
+; RV64I-NEXT: vse64.v v8, (a0)
+; RV64I-NEXT: ret
+;
+; RV32F-LABEL: ctlz_v4i64:
+; RV32F: # %bb.0:
+; RV32F-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32F-NEXT: vle64.v v8, (a0)
+; RV32F-NEXT: li a1, 190
+; RV32F-NEXT: vmv.v.x v10, a1
+; RV32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV32F-NEXT: fsrmi a1, 1
+; RV32F-NEXT: vfncvt.f.xu.w v12, v8
+; RV32F-NEXT: fsrm a1
+; RV32F-NEXT: vsrl.vi v8, v12, 23
+; RV32F-NEXT: vwsubu.wv v10, v10, v8
+; RV32F-NEXT: li a1, 64
+; RV32F-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32F-NEXT: vminu.vx v8, v10, a1
+; RV32F-NEXT: vse64.v v8, (a0)
+; RV32F-NEXT: ret
+;
+; RV64F-LABEL: ctlz_v4i64:
+; RV64F: # %bb.0:
+; RV64F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV64F-NEXT: vle64.v v8, (a0)
+; RV64F-NEXT: li a1, 190
+; RV64F-NEXT: vmv.v.x v10, a1
+; RV64F-NEXT: fsrmi a1, 1
+; RV64F-NEXT: vfncvt.f.xu.w v11, v8
+; RV64F-NEXT: fsrm a1
+; RV64F-NEXT: vsrl.vi v8, v11, 23
+; RV64F-NEXT: vwsubu.vv v12, v10, v8
+; RV64F-NEXT: li a1, 64
+; RV64F-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64F-NEXT: vminu.vx v8, v12, a1
+; RV64F-NEXT: vse64.v v8, (a0)
+; RV64F-NEXT: ret
+;
+; RVD-LABEL: ctlz_v4i64:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RVD-NEXT: vle64.v v8, (a0)
+; RVD-NEXT: fsrmi a1, 1
+; RVD-NEXT: vfcvt.f.xu.v v8, v8
+; RVD-NEXT: fsrm a1
+; RVD-NEXT: li a1, 52
+; RVD-NEXT: vsrl.vx v8, v8, a1
+; RVD-NEXT: li a1, 1086
+; RVD-NEXT: vrsub.vx v8, v8, a1
+; RVD-NEXT: li a1, 64
+; RVD-NEXT: vminu.vx v8, v8, a1
+; RVD-NEXT: vse64.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: ctlz_v4i64:
; ZVBB: # %bb.0:
@@ -1225,45 +823,59 @@ define void @ctlz_v4i64(ptr %x, ptr %y) nounwind {
declare <4 x i64> @llvm.ctlz.v4i64(<4 x i64>, i1)
define void @ctlz_zero_undef_v16i8(ptr %x, ptr %y) nounwind {
-; CHECK-LABEL: ctlz_zero_undef_v16i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: vsrl.vi v9, v8, 1
-; CHECK-NEXT: vor.vv v8, v8, v9
-; CHECK-NEXT: vsrl.vi v9, v8, 2
-; CHECK-NEXT: vor.vv v8, v8, v9
-; CHECK-NEXT: vsrl.vi v9, v8, 4
-; CHECK-NEXT: vor.vv v8, v8, v9
-; CHECK-NEXT: vnot.v v8, v8
-; CHECK-NEXT: vsrl.vi v9, v8, 1
-; CHECK-NEXT: li a1, 85
-; CHECK-NEXT: vand.vx v9, v9, a1
-; CHECK-NEXT: vsub.vv v8, v8, v9
-; CHECK-NEXT: li a1, 51
-; CHECK-NEXT: vand.vx v9, v8, a1
-; CHECK-NEXT: vsrl.vi v8, v8, 2
-; CHECK-NEXT: vand.vx v8, v8, a1
-; CHECK-NEXT: vadd.vv v8, v9, v8
-; CHECK-NEXT: vsrl.vi v9, v8, 4
-; CHECK-NEXT: vadd.vv v8, v8, v9
-; CHECK-NEXT: vand.vi v8, v8, 15
-; CHECK-NEXT: vse8.v v8, (a0)
-; CHECK-NEXT: ret
-;
-; LMULMAX8-LABEL: ctlz_zero_undef_v16i8:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX8-NEXT: vle8.v v8, (a0)
-; LMULMAX8-NEXT: vzext.vf2 v10, v8
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v12, v10
-; LMULMAX8-NEXT: vnsrl.wi v8, v12, 23
-; LMULMAX8-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; LMULMAX8-NEXT: vnsrl.wi v10, v8, 0
-; LMULMAX8-NEXT: li a1, 134
-; LMULMAX8-NEXT: vrsub.vx v8, v10, a1
-; LMULMAX8-NEXT: vse8.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RVI-LABEL: ctlz_zero_undef_v16i8:
+; RVI: # %bb.0:
+; RVI-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RVI-NEXT: vle8.v v8, (a0)
+; RVI-NEXT: vsrl.vi v9, v8, 1
+; RVI-NEXT: vor.vv v8, v8, v9
+; RVI-NEXT: vsrl.vi v9, v8, 2
+; RVI-NEXT: vor.vv v8, v8, v9
+; RVI-NEXT: vsrl.vi v9, v8, 4
+; RVI-NEXT: vor.vv v8, v8, v9
+; RVI-NEXT: vnot.v v8, v8
+; RVI-NEXT: vsrl.vi v9, v8, 1
+; RVI-NEXT: li a1, 85
+; RVI-NEXT: vand.vx v9, v9, a1
+; RVI-NEXT: vsub.vv v8, v8, v9
+; RVI-NEXT: li a1, 51
+; RVI-NEXT: vand.vx v9, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 2
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: vadd.vv v8, v9, v8
+; RVI-NEXT: vsrl.vi v9, v8, 4
+; RVI-NEXT: vadd.vv v8, v8, v9
+; RVI-NEXT: vand.vi v8, v8, 15
+; RVI-NEXT: vse8.v v8, (a0)
+; RVI-NEXT: ret
+;
+; RVF-LABEL: ctlz_zero_undef_v16i8:
+; RVF: # %bb.0:
+; RVF-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RVF-NEXT: vle8.v v8, (a0)
+; RVF-NEXT: vzext.vf2 v10, v8
+; RVF-NEXT: vfwcvt.f.xu.v v12, v10
+; RVF-NEXT: vnsrl.wi v8, v12, 23
+; RVF-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; RVF-NEXT: vnsrl.wi v10, v8, 0
+; RVF-NEXT: li a1, 134
+; RVF-NEXT: vrsub.vx v8, v10, a1
+; RVF-NEXT: vse8.v v8, (a0)
+; RVF-NEXT: ret
+;
+; RVD-LABEL: ctlz_zero_undef_v16i8:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RVD-NEXT: vle8.v v8, (a0)
+; RVD-NEXT: vzext.vf2 v10, v8
+; RVD-NEXT: vfwcvt.f.xu.v v12, v10
+; RVD-NEXT: vnsrl.wi v8, v12, 23
+; RVD-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; RVD-NEXT: vnsrl.wi v10, v8, 0
+; RVD-NEXT: li a1, 134
+; RVD-NEXT: vrsub.vx v8, v10, a1
+; RVD-NEXT: vse8.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: ctlz_zero_undef_v16i8:
; ZVBB: # %bb.0:
@@ -1280,165 +892,62 @@ define void @ctlz_zero_undef_v16i8(ptr %x, ptr %y) nounwind {
}
define void @ctlz_zero_undef_v8i16(ptr %x, ptr %y) nounwind {
-; LMULMAX2-RV32I-LABEL: ctlz_zero_undef_v8i16:
-; LMULMAX2-RV32I: # %bb.0:
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 2
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 8
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV32I-NEXT: lui a1, 5
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV32I-NEXT: vand.vx v9, v9, a1
-; LMULMAX2-RV32I-NEXT: vsub.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 3
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV32I-NEXT: vand.vx v9, v8, a1
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV32I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v9, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 1
-; LMULMAX2-RV32I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV32I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: li a1, 257
-; LMULMAX2-RV32I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 8
-; LMULMAX2-RV32I-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: ret
-;
-; LMULMAX2-RV64I-LABEL: ctlz_zero_undef_v8i16:
-; LMULMAX2-RV64I: # %bb.0:
-; LMULMAX2-RV64I-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX2-RV64I-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 2
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 8
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV64I-NEXT: lui a1, 5
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV64I-NEXT: vand.vx v9, v9, a1
-; LMULMAX2-RV64I-NEXT: vsub.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: lui a1, 3
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV64I-NEXT: vand.vx v9, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v9, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: lui a1, 1
-; LMULMAX2-RV64I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: li a1, 257
-; LMULMAX2-RV64I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 8
-; LMULMAX2-RV64I-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: ret
-;
-; LMULMAX1-LABEL: ctlz_zero_undef_v8i16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vle16.v v8, (a0)
-; LMULMAX1-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX1-NEXT: vor.vv v8, v8, v9
-; LMULMAX1-NEXT: vsrl.vi v9, v8, 2
-; LMULMAX1-NEXT: vor.vv v8, v8, v9
-; LMULMAX1-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX1-NEXT: vor.vv v8, v8, v9
-; LMULMAX1-NEXT: vsrl.vi v9, v8, 8
-; LMULMAX1-NEXT: vor.vv v8, v8, v9
-; LMULMAX1-NEXT: vnot.v v8, v8
-; LMULMAX1-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX1-NEXT: lui a1, 5
-; LMULMAX1-NEXT: addi a1, a1, 1365
-; LMULMAX1-NEXT: vand.vx v9, v9, a1
-; LMULMAX1-NEXT: vsub.vv v8, v8, v9
-; LMULMAX1-NEXT: lui a1, 3
-; LMULMAX1-NEXT: addi a1, a1, 819
-; LMULMAX1-NEXT: vand.vx v9, v8, a1
-; LMULMAX1-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX1-NEXT: vand.vx v8, v8, a1
-; LMULMAX1-NEXT: vadd.vv v8, v9, v8
-; LMULMAX1-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX1-NEXT: vadd.vv v8, v8, v9
-; LMULMAX1-NEXT: lui a1, 1
-; LMULMAX1-NEXT: addi a1, a1, -241
-; LMULMAX1-NEXT: vand.vx v8, v8, a1
-; LMULMAX1-NEXT: li a1, 257
-; LMULMAX1-NEXT: vmul.vx v8, v8, a1
-; LMULMAX1-NEXT: vsrl.vi v8, v8, 8
-; LMULMAX1-NEXT: vse16.v v8, (a0)
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX2-RV32F-LABEL: ctlz_zero_undef_v8i16:
-; LMULMAX2-RV32F: # %bb.0:
-; LMULMAX2-RV32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX2-RV32F-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: vfwcvt.f.xu.v v10, v8
-; LMULMAX2-RV32F-NEXT: vnsrl.wi v8, v10, 23
-; LMULMAX2-RV32F-NEXT: li a1, 142
-; LMULMAX2-RV32F-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV32F-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: ret
-;
-; LMULMAX2-RV64F-LABEL: ctlz_zero_undef_v8i16:
-; LMULMAX2-RV64F: # %bb.0:
-; LMULMAX2-RV64F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX2-RV64F-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: vfwcvt.f.xu.v v10, v8
-; LMULMAX2-RV64F-NEXT: vnsrl.wi v8, v10, 23
-; LMULMAX2-RV64F-NEXT: li a1, 142
-; LMULMAX2-RV64F-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV64F-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: ret
-;
-; LMULMAX2-RV32D-LABEL: ctlz_zero_undef_v8i16:
-; LMULMAX2-RV32D: # %bb.0:
-; LMULMAX2-RV32D-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX2-RV32D-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: vfwcvt.f.xu.v v10, v8
-; LMULMAX2-RV32D-NEXT: vnsrl.wi v8, v10, 23
-; LMULMAX2-RV32D-NEXT: li a1, 142
-; LMULMAX2-RV32D-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV32D-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: ret
-;
-; LMULMAX2-RV64D-LABEL: ctlz_zero_undef_v8i16:
-; LMULMAX2-RV64D: # %bb.0:
-; LMULMAX2-RV64D-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX2-RV64D-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: vfwcvt.f.xu.v v10, v8
-; LMULMAX2-RV64D-NEXT: vnsrl.wi v8, v10, 23
-; LMULMAX2-RV64D-NEXT: li a1, 142
-; LMULMAX2-RV64D-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV64D-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: ret
-;
-; LMULMAX8-LABEL: ctlz_zero_undef_v8i16:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX8-NEXT: vle16.v v8, (a0)
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v10, v8
-; LMULMAX8-NEXT: vnsrl.wi v8, v10, 23
-; LMULMAX8-NEXT: li a1, 142
-; LMULMAX8-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX8-NEXT: vse16.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RVI-LABEL: ctlz_zero_undef_v8i16:
+; RVI: # %bb.0:
+; RVI-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RVI-NEXT: vle16.v v8, (a0)
+; RVI-NEXT: vsrl.vi v9, v8, 1
+; RVI-NEXT: vor.vv v8, v8, v9
+; RVI-NEXT: vsrl.vi v9, v8, 2
+; RVI-NEXT: vor.vv v8, v8, v9
+; RVI-NEXT: vsrl.vi v9, v8, 4
+; RVI-NEXT: vor.vv v8, v8, v9
+; RVI-NEXT: vsrl.vi v9, v8, 8
+; RVI-NEXT: vor.vv v8, v8, v9
+; RVI-NEXT: vnot.v v8, v8
+; RVI-NEXT: vsrl.vi v9, v8, 1
+; RVI-NEXT: lui a1, 5
+; RVI-NEXT: addi a1, a1, 1365
+; RVI-NEXT: vand.vx v9, v9, a1
+; RVI-NEXT: vsub.vv v8, v8, v9
+; RVI-NEXT: lui a1, 3
+; RVI-NEXT: addi a1, a1, 819
+; RVI-NEXT: vand.vx v9, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 2
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: vadd.vv v8, v9, v8
+; RVI-NEXT: vsrl.vi v9, v8, 4
+; RVI-NEXT: vadd.vv v8, v8, v9
+; RVI-NEXT: lui a1, 1
+; RVI-NEXT: addi a1, a1, -241
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: li a1, 257
+; RVI-NEXT: vmul.vx v8, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 8
+; RVI-NEXT: vse16.v v8, (a0)
+; RVI-NEXT: ret
+;
+; RVF-LABEL: ctlz_zero_undef_v8i16:
+; RVF: # %bb.0:
+; RVF-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RVF-NEXT: vle16.v v8, (a0)
+; RVF-NEXT: vfwcvt.f.xu.v v10, v8
+; RVF-NEXT: vnsrl.wi v8, v10, 23
+; RVF-NEXT: li a1, 142
+; RVF-NEXT: vrsub.vx v8, v8, a1
+; RVF-NEXT: vse16.v v8, (a0)
+; RVF-NEXT: ret
+;
+; RVD-LABEL: ctlz_zero_undef_v8i16:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RVD-NEXT: vle16.v v8, (a0)
+; RVD-NEXT: vfwcvt.f.xu.v v10, v8
+; RVD-NEXT: vnsrl.wi v8, v10, 23
+; RVD-NEXT: li a1, 142
+; RVD-NEXT: vrsub.vx v8, v8, a1
+; RVD-NEXT: vse16.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: ctlz_zero_undef_v8i16:
; ZVBB: # %bb.0:
@@ -1455,143 +964,68 @@ define void @ctlz_zero_undef_v8i16(ptr %x, ptr %y) nounwind {
}
define void @ctlz_zero_undef_v4i32(ptr %x, ptr %y) nounwind {
-; LMULMAX2-RV32I-LABEL: ctlz_zero_undef_v4i32:
-; LMULMAX2-RV32I: # %bb.0:
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 2
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 8
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 16
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV32I-NEXT: lui a1, 349525
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV32I-NEXT: vand.vx v9, v9, a1
-; LMULMAX2-RV32I-NEXT: vsub.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 209715
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV32I-NEXT: vand.vx v9, v8, a1
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV32I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v9, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 61681
-; LMULMAX2-RV32I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV32I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: lui a1, 4112
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 257
-; LMULMAX2-RV32I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 24
-; LMULMAX2-RV32I-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: ret
-;
-; LMULMAX2-RV64I-LABEL: ctlz_zero_undef_v4i32:
-; LMULMAX2-RV64I: # %bb.0:
-; LMULMAX2-RV64I-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX2-RV64I-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 2
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 8
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 16
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV64I-NEXT: lui a1, 349525
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV64I-NEXT: vand.vx v9, v9, a1
-; LMULMAX2-RV64I-NEXT: vsub.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: lui a1, 209715
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV64I-NEXT: vand.vx v9, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v9, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: lui a1, 61681
-; LMULMAX2-RV64I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: lui a1, 4112
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 257
-; LMULMAX2-RV64I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 24
-; LMULMAX2-RV64I-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: ret
-;
-; LMULMAX2-RV32F-LABEL: ctlz_zero_undef_v4i32:
-; LMULMAX2-RV32F: # %bb.0:
-; LMULMAX2-RV32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX2-RV32F-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32F-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV32F-NEXT: fsrm a1
-; LMULMAX2-RV32F-NEXT: vsrl.vi v8, v8, 23
-; LMULMAX2-RV32F-NEXT: li a1, 158
-; LMULMAX2-RV32F-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV32F-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: ret
-;
-; LMULMAX2-RV64F-LABEL: ctlz_zero_undef_v4i32:
-; LMULMAX2-RV64F: # %bb.0:
-; LMULMAX2-RV64F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX2-RV64F-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64F-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV64F-NEXT: fsrm a1
-; LMULMAX2-RV64F-NEXT: vsrl.vi v8, v8, 23
-; LMULMAX2-RV64F-NEXT: li a1, 158
-; LMULMAX2-RV64F-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV64F-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: ret
-;
-; LMULMAX2-RV32D-LABEL: ctlz_zero_undef_v4i32:
-; LMULMAX2-RV32D: # %bb.0:
-; LMULMAX2-RV32D-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX2-RV32D-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: vfwcvt.f.xu.v v10, v8
-; LMULMAX2-RV32D-NEXT: li a1, 52
-; LMULMAX2-RV32D-NEXT: vnsrl.wx v8, v10, a1
-; LMULMAX2-RV32D-NEXT: li a1, 1054
-; LMULMAX2-RV32D-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV32D-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: ret
-;
-; LMULMAX2-RV64D-LABEL: ctlz_zero_undef_v4i32:
-; LMULMAX2-RV64D: # %bb.0:
-; LMULMAX2-RV64D-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX2-RV64D-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: vfwcvt.f.xu.v v10, v8
-; LMULMAX2-RV64D-NEXT: li a1, 52
-; LMULMAX2-RV64D-NEXT: vnsrl.wx v8, v10, a1
-; LMULMAX2-RV64D-NEXT: li a1, 1054
-; LMULMAX2-RV64D-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV64D-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: ret
-;
-; LMULMAX8-LABEL: ctlz_zero_undef_v4i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX8-NEXT: vle32.v v8, (a0)
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v10, v8
-; LMULMAX8-NEXT: li a1, 52
-; LMULMAX8-NEXT: vnsrl.wx v8, v10, a1
-; LMULMAX8-NEXT: li a1, 1054
-; LMULMAX8-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX8-NEXT: vse32.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RVI-LABEL: ctlz_zero_undef_v4i32:
+; RVI: # %bb.0:
+; RVI-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RVI-NEXT: vle32.v v8, (a0)
+; RVI-NEXT: vsrl.vi v9, v8, 1
+; RVI-NEXT: vor.vv v8, v8, v9
+; RVI-NEXT: vsrl.vi v9, v8, 2
+; RVI-NEXT: vor.vv v8, v8, v9
+; RVI-NEXT: vsrl.vi v9, v8, 4
+; RVI-NEXT: vor.vv v8, v8, v9
+; RVI-NEXT: vsrl.vi v9, v8, 8
+; RVI-NEXT: vor.vv v8, v8, v9
+; RVI-NEXT: vsrl.vi v9, v8, 16
+; RVI-NEXT: vor.vv v8, v8, v9
+; RVI-NEXT: vnot.v v8, v8
+; RVI-NEXT: vsrl.vi v9, v8, 1
+; RVI-NEXT: lui a1, 349525
+; RVI-NEXT: addi a1, a1, 1365
+; RVI-NEXT: vand.vx v9, v9, a1
+; RVI-NEXT: vsub.vv v8, v8, v9
+; RVI-NEXT: lui a1, 209715
+; RVI-NEXT: addi a1, a1, 819
+; RVI-NEXT: vand.vx v9, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 2
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: vadd.vv v8, v9, v8
+; RVI-NEXT: vsrl.vi v9, v8, 4
+; RVI-NEXT: vadd.vv v8, v8, v9
+; RVI-NEXT: lui a1, 61681
+; RVI-NEXT: addi a1, a1, -241
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: lui a1, 4112
+; RVI-NEXT: addi a1, a1, 257
+; RVI-NEXT: vmul.vx v8, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 24
+; RVI-NEXT: vse32.v v8, (a0)
+; RVI-NEXT: ret
+;
+; RVF-LABEL: ctlz_zero_undef_v4i32:
+; RVF: # %bb.0:
+; RVF-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RVF-NEXT: vle32.v v8, (a0)
+; RVF-NEXT: fsrmi a1, 1
+; RVF-NEXT: vfcvt.f.xu.v v8, v8
+; RVF-NEXT: fsrm a1
+; RVF-NEXT: vsrl.vi v8, v8, 23
+; RVF-NEXT: li a1, 158
+; RVF-NEXT: vrsub.vx v8, v8, a1
+; RVF-NEXT: vse32.v v8, (a0)
+; RVF-NEXT: ret
+;
+; RVD-LABEL: ctlz_zero_undef_v4i32:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RVD-NEXT: vle32.v v8, (a0)
+; RVD-NEXT: vfwcvt.f.xu.v v10, v8
+; RVD-NEXT: li a1, 52
+; RVD-NEXT: vnsrl.wx v8, v10, a1
+; RVD-NEXT: li a1, 1054
+; RVD-NEXT: vrsub.vx v8, v8, a1
+; RVD-NEXT: vse32.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: ctlz_zero_undef_v4i32:
; ZVBB: # %bb.0:
@@ -1608,180 +1042,152 @@ define void @ctlz_zero_undef_v4i32(ptr %x, ptr %y) nounwind {
}
define void @ctlz_zero_undef_v2i64(ptr %x, ptr %y) nounwind {
-; LMULMAX2-RV32I-LABEL: ctlz_zero_undef_v2i64:
-; LMULMAX2-RV32I: # %bb.0:
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 2
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 8
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 16
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: li a1, 32
-; LMULMAX2-RV32I-NEXT: vsrl.vx v9, v8, a1
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV32I-NEXT: lui a1, 349525
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vand.vv v9, v9, v10
-; LMULMAX2-RV32I-NEXT: vsub.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 209715
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v9, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vand.vv v10, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 61681
-; LMULMAX2-RV32I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v9, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 4112
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 257
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v9, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vmul.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: li a1, 56
-; LMULMAX2-RV32I-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: ret
-;
-; LMULMAX2-RV64I-LABEL: ctlz_zero_undef_v2i64:
-; LMULMAX2-RV64I: # %bb.0:
-; LMULMAX2-RV64I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV64I-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 2
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 8
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 16
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: li a1, 32
-; LMULMAX2-RV64I-NEXT: vsrl.vx v9, v8, a1
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV64I-NEXT: lui a1, 349525
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, 1365
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vand.vx v9, v9, a1
-; LMULMAX2-RV64I-NEXT: vsub.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: lui a1, 209715
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, 819
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vand.vx v9, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v9, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: lui a1, 61681
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, -241
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: lui a1, 4112
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, 257
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: li a1, 56
-; LMULMAX2-RV64I-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: ret
-;
-; LMULMAX2-RV32F-LABEL: ctlz_zero_undef_v2i64:
-; LMULMAX2-RV32F: # %bb.0:
-; LMULMAX2-RV32F-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32F-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: li a1, 190
-; LMULMAX2-RV32F-NEXT: vmv.v.x v9, a1
-; LMULMAX2-RV32F-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; LMULMAX2-RV32F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32F-NEXT: vfncvt.f.xu.w v10, v8
-; LMULMAX2-RV32F-NEXT: fsrm a1
-; LMULMAX2-RV32F-NEXT: vsrl.vi v8, v10, 23
-; LMULMAX2-RV32F-NEXT: vwsubu.wv v9, v9, v8
-; LMULMAX2-RV32F-NEXT: vse64.v v9, (a0)
-; LMULMAX2-RV32F-NEXT: ret
-;
-; LMULMAX2-RV64F-LABEL: ctlz_zero_undef_v2i64:
-; LMULMAX2-RV64F: # %bb.0:
-; LMULMAX2-RV64F-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX2-RV64F-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: li a1, 190
-; LMULMAX2-RV64F-NEXT: vmv.v.x v9, a1
-; LMULMAX2-RV64F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64F-NEXT: vfncvt.f.xu.w v10, v8
-; LMULMAX2-RV64F-NEXT: fsrm a1
-; LMULMAX2-RV64F-NEXT: vsrl.vi v8, v10, 23
-; LMULMAX2-RV64F-NEXT: vwsubu.vv v10, v9, v8
-; LMULMAX2-RV64F-NEXT: vse64.v v10, (a0)
-; LMULMAX2-RV64F-NEXT: ret
-;
-; LMULMAX2-RV32D-LABEL: ctlz_zero_undef_v2i64:
-; LMULMAX2-RV32D: # %bb.0:
-; LMULMAX2-RV32D-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32D-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32D-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV32D-NEXT: fsrm a1
-; LMULMAX2-RV32D-NEXT: li a1, 52
-; LMULMAX2-RV32D-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV32D-NEXT: li a1, 1086
-; LMULMAX2-RV32D-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV32D-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: ret
-;
-; LMULMAX2-RV64D-LABEL: ctlz_zero_undef_v2i64:
-; LMULMAX2-RV64D: # %bb.0:
-; LMULMAX2-RV64D-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV64D-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64D-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV64D-NEXT: fsrm a1
-; LMULMAX2-RV64D-NEXT: li a1, 52
-; LMULMAX2-RV64D-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV64D-NEXT: li a1, 1086
-; LMULMAX2-RV64D-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV64D-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: ret
-;
-; LMULMAX8-LABEL: ctlz_zero_undef_v2i64:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX8-NEXT: vle64.v v8, (a0)
-; LMULMAX8-NEXT: fsrmi a1, 1
-; LMULMAX8-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX8-NEXT: fsrm a1
-; LMULMAX8-NEXT: li a1, 52
-; LMULMAX8-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX8-NEXT: li a1, 1086
-; LMULMAX8-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX8-NEXT: vse64.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RV32I-LABEL: ctlz_zero_undef_v2i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32I-NEXT: vle64.v v8, (a0)
+; RV32I-NEXT: vsrl.vi v9, v8, 1
+; RV32I-NEXT: vor.vv v8, v8, v9
+; RV32I-NEXT: vsrl.vi v9, v8, 2
+; RV32I-NEXT: vor.vv v8, v8, v9
+; RV32I-NEXT: vsrl.vi v9, v8, 4
+; RV32I-NEXT: vor.vv v8, v8, v9
+; RV32I-NEXT: vsrl.vi v9, v8, 8
+; RV32I-NEXT: vor.vv v8, v8, v9
+; RV32I-NEXT: vsrl.vi v9, v8, 16
+; RV32I-NEXT: vor.vv v8, v8, v9
+; RV32I-NEXT: li a1, 32
+; RV32I-NEXT: vsrl.vx v9, v8, a1
+; RV32I-NEXT: vor.vv v8, v8, v9
+; RV32I-NEXT: vnot.v v8, v8
+; RV32I-NEXT: vsrl.vi v9, v8, 1
+; RV32I-NEXT: lui a1, 349525
+; RV32I-NEXT: addi a1, a1, 1365
+; RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
+; RV32I-NEXT: vmv.v.x v10, a1
+; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32I-NEXT: vand.vv v9, v9, v10
+; RV32I-NEXT: vsub.vv v8, v8, v9
+; RV32I-NEXT: lui a1, 209715
+; RV32I-NEXT: addi a1, a1, 819
+; RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
+; RV32I-NEXT: vmv.v.x v9, a1
+; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32I-NEXT: vand.vv v10, v8, v9
+; RV32I-NEXT: vsrl.vi v8, v8, 2
+; RV32I-NEXT: vand.vv v8, v8, v9
+; RV32I-NEXT: vadd.vv v8, v10, v8
+; RV32I-NEXT: vsrl.vi v9, v8, 4
+; RV32I-NEXT: vadd.vv v8, v8, v9
+; RV32I-NEXT: lui a1, 61681
+; RV32I-NEXT: addi a1, a1, -241
+; RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
+; RV32I-NEXT: vmv.v.x v9, a1
+; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32I-NEXT: vand.vv v8, v8, v9
+; RV32I-NEXT: lui a1, 4112
+; RV32I-NEXT: addi a1, a1, 257
+; RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
+; RV32I-NEXT: vmv.v.x v9, a1
+; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32I-NEXT: vmul.vv v8, v8, v9
+; RV32I-NEXT: li a1, 56
+; RV32I-NEXT: vsrl.vx v8, v8, a1
+; RV32I-NEXT: vse64.v v8, (a0)
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: ctlz_zero_undef_v2i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64I-NEXT: vle64.v v8, (a0)
+; RV64I-NEXT: vsrl.vi v9, v8, 1
+; RV64I-NEXT: vor.vv v8, v8, v9
+; RV64I-NEXT: vsrl.vi v9, v8, 2
+; RV64I-NEXT: vor.vv v8, v8, v9
+; RV64I-NEXT: vsrl.vi v9, v8, 4
+; RV64I-NEXT: vor.vv v8, v8, v9
+; RV64I-NEXT: vsrl.vi v9, v8, 8
+; RV64I-NEXT: vor.vv v8, v8, v9
+; RV64I-NEXT: vsrl.vi v9, v8, 16
+; RV64I-NEXT: vor.vv v8, v8, v9
+; RV64I-NEXT: li a1, 32
+; RV64I-NEXT: vsrl.vx v9, v8, a1
+; RV64I-NEXT: vor.vv v8, v8, v9
+; RV64I-NEXT: vnot.v v8, v8
+; RV64I-NEXT: vsrl.vi v9, v8, 1
+; RV64I-NEXT: lui a1, 349525
+; RV64I-NEXT: addiw a1, a1, 1365
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vand.vx v9, v9, a1
+; RV64I-NEXT: vsub.vv v8, v8, v9
+; RV64I-NEXT: lui a1, 209715
+; RV64I-NEXT: addiw a1, a1, 819
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vand.vx v9, v8, a1
+; RV64I-NEXT: vsrl.vi v8, v8, 2
+; RV64I-NEXT: vand.vx v8, v8, a1
+; RV64I-NEXT: vadd.vv v8, v9, v8
+; RV64I-NEXT: vsrl.vi v9, v8, 4
+; RV64I-NEXT: vadd.vv v8, v8, v9
+; RV64I-NEXT: lui a1, 61681
+; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vand.vx v8, v8, a1
+; RV64I-NEXT: lui a1, 4112
+; RV64I-NEXT: addiw a1, a1, 257
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vmul.vx v8, v8, a1
+; RV64I-NEXT: li a1, 56
+; RV64I-NEXT: vsrl.vx v8, v8, a1
+; RV64I-NEXT: vse64.v v8, (a0)
+; RV64I-NEXT: ret
+;
+; RV32F-LABEL: ctlz_zero_undef_v2i64:
+; RV32F: # %bb.0:
+; RV32F-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32F-NEXT: vle64.v v8, (a0)
+; RV32F-NEXT: li a1, 190
+; RV32F-NEXT: vmv.v.x v9, a1
+; RV32F-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32F-NEXT: fsrmi a1, 1
+; RV32F-NEXT: vfncvt.f.xu.w v10, v8
+; RV32F-NEXT: fsrm a1
+; RV32F-NEXT: vsrl.vi v8, v10, 23
+; RV32F-NEXT: vwsubu.wv v9, v9, v8
+; RV32F-NEXT: vse64.v v9, (a0)
+; RV32F-NEXT: ret
+;
+; RV64F-LABEL: ctlz_zero_undef_v2i64:
+; RV64F: # %bb.0:
+; RV64F-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; RV64F-NEXT: vle64.v v8, (a0)
+; RV64F-NEXT: li a1, 190
+; RV64F-NEXT: vmv.v.x v9, a1
+; RV64F-NEXT: fsrmi a1, 1
+; RV64F-NEXT: vfncvt.f.xu.w v10, v8
+; RV64F-NEXT: fsrm a1
+; RV64F-NEXT: vsrl.vi v8, v10, 23
+; RV64F-NEXT: vwsubu.vv v10, v9, v8
+; RV64F-NEXT: vse64.v v10, (a0)
+; RV64F-NEXT: ret
+;
+; RVD-LABEL: ctlz_zero_undef_v2i64:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RVD-NEXT: vle64.v v8, (a0)
+; RVD-NEXT: fsrmi a1, 1
+; RVD-NEXT: vfcvt.f.xu.v v8, v8
+; RVD-NEXT: fsrm a1
+; RVD-NEXT: li a1, 52
+; RVD-NEXT: vsrl.vx v8, v8, a1
+; RVD-NEXT: li a1, 1086
+; RVD-NEXT: vrsub.vx v8, v8, a1
+; RVD-NEXT: vse64.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: ctlz_zero_undef_v2i64:
; ZVBB: # %bb.0:
@@ -1798,93 +1204,62 @@ define void @ctlz_zero_undef_v2i64(ptr %x, ptr %y) nounwind {
}
define void @ctlz_zero_undef_v32i8(ptr %x, ptr %y) nounwind {
-; LMULMAX2-LABEL: ctlz_zero_undef_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a1, 32
-; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX2-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-NEXT: vnot.v v8, v8
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-NEXT: li a1, 85
-; LMULMAX2-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-NEXT: li a1, 51
-; LMULMAX2-NEXT: vand.vx v10, v8, a1
-; LMULMAX2-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-NEXT: vand.vi v8, v8, 15
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: ctlz_zero_undef_v32i8:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vle8.v v8, (a1)
-; LMULMAX1-NEXT: vle8.v v9, (a0)
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX1-NEXT: vor.vv v8, v8, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX1-NEXT: vor.vv v8, v8, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX1-NEXT: vor.vv v8, v8, v10
-; LMULMAX1-NEXT: vnot.v v8, v8
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX1-NEXT: li a2, 85
-; LMULMAX1-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-NEXT: vsub.vv v8, v8, v10
-; LMULMAX1-NEXT: li a3, 51
-; LMULMAX1-NEXT: vand.vx v10, v8, a3
-; LMULMAX1-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX1-NEXT: vand.vx v8, v8, a3
-; LMULMAX1-NEXT: vadd.vv v8, v10, v8
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX1-NEXT: vadd.vv v8, v8, v10
-; LMULMAX1-NEXT: vand.vi v8, v8, 15
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 1
-; LMULMAX1-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 2
-; LMULMAX1-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 4
-; LMULMAX1-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-NEXT: vnot.v v9, v9
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 1
-; LMULMAX1-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-NEXT: vsub.vv v9, v9, v10
-; LMULMAX1-NEXT: vand.vx v10, v9, a3
-; LMULMAX1-NEXT: vsrl.vi v9, v9, 2
-; LMULMAX1-NEXT: vand.vx v9, v9, a3
-; LMULMAX1-NEXT: vadd.vv v9, v10, v9
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 4
-; LMULMAX1-NEXT: vadd.vv v9, v9, v10
-; LMULMAX1-NEXT: vand.vi v9, v9, 15
-; LMULMAX1-NEXT: vse8.v v9, (a0)
-; LMULMAX1-NEXT: vse8.v v8, (a1)
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX8-LABEL: ctlz_zero_undef_v32i8:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a1, 32
-; LMULMAX8-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; LMULMAX8-NEXT: vle8.v v8, (a0)
-; LMULMAX8-NEXT: vzext.vf2 v12, v8
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v16, v12
-; LMULMAX8-NEXT: vnsrl.wi v8, v16, 23
-; LMULMAX8-NEXT: vsetvli zero, zero, e8, m2, ta, ma
-; LMULMAX8-NEXT: vnsrl.wi v12, v8, 0
-; LMULMAX8-NEXT: li a1, 134
-; LMULMAX8-NEXT: vrsub.vx v8, v12, a1
-; LMULMAX8-NEXT: vse8.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RVI-LABEL: ctlz_zero_undef_v32i8:
+; RVI: # %bb.0:
+; RVI-NEXT: li a1, 32
+; RVI-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RVI-NEXT: vle8.v v8, (a0)
+; RVI-NEXT: vsrl.vi v10, v8, 1
+; RVI-NEXT: vor.vv v8, v8, v10
+; RVI-NEXT: vsrl.vi v10, v8, 2
+; RVI-NEXT: vor.vv v8, v8, v10
+; RVI-NEXT: vsrl.vi v10, v8, 4
+; RVI-NEXT: vor.vv v8, v8, v10
+; RVI-NEXT: vnot.v v8, v8
+; RVI-NEXT: vsrl.vi v10, v8, 1
+; RVI-NEXT: li a1, 85
+; RVI-NEXT: vand.vx v10, v10, a1
+; RVI-NEXT: vsub.vv v8, v8, v10
+; RVI-NEXT: li a1, 51
+; RVI-NEXT: vand.vx v10, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 2
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: vadd.vv v8, v10, v8
+; RVI-NEXT: vsrl.vi v10, v8, 4
+; RVI-NEXT: vadd.vv v8, v8, v10
+; RVI-NEXT: vand.vi v8, v8, 15
+; RVI-NEXT: vse8.v v8, (a0)
+; RVI-NEXT: ret
+;
+; RVF-LABEL: ctlz_zero_undef_v32i8:
+; RVF: # %bb.0:
+; RVF-NEXT: li a1, 32
+; RVF-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; RVF-NEXT: vle8.v v8, (a0)
+; RVF-NEXT: vzext.vf2 v12, v8
+; RVF-NEXT: vfwcvt.f.xu.v v16, v12
+; RVF-NEXT: vnsrl.wi v8, v16, 23
+; RVF-NEXT: vsetvli zero, zero, e8, m2, ta, ma
+; RVF-NEXT: vnsrl.wi v12, v8, 0
+; RVF-NEXT: li a1, 134
+; RVF-NEXT: vrsub.vx v8, v12, a1
+; RVF-NEXT: vse8.v v8, (a0)
+; RVF-NEXT: ret
+;
+; RVD-LABEL: ctlz_zero_undef_v32i8:
+; RVD: # %bb.0:
+; RVD-NEXT: li a1, 32
+; RVD-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; RVD-NEXT: vle8.v v8, (a0)
+; RVD-NEXT: vzext.vf2 v12, v8
+; RVD-NEXT: vfwcvt.f.xu.v v16, v12
+; RVD-NEXT: vnsrl.wi v8, v16, 23
+; RVD-NEXT: vsetvli zero, zero, e8, m2, ta, ma
+; RVD-NEXT: vnsrl.wi v12, v8, 0
+; RVD-NEXT: li a1, 134
+; RVD-NEXT: vrsub.vx v8, v12, a1
+; RVD-NEXT: vse8.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: ctlz_zero_undef_v32i8:
; ZVBB: # %bb.0:
@@ -1902,110 +1277,62 @@ define void @ctlz_zero_undef_v32i8(ptr %x, ptr %y) nounwind {
}
define void @ctlz_zero_undef_v16i16(ptr %x, ptr %y) nounwind {
-; LMULMAX2-LABEL: ctlz_zero_undef_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX2-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX2-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-NEXT: vnot.v v8, v8
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-NEXT: lui a1, 5
-; LMULMAX2-NEXT: addi a1, a1, 1365
-; LMULMAX2-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-NEXT: lui a1, 3
-; LMULMAX2-NEXT: addi a1, a1, 819
-; LMULMAX2-NEXT: vand.vx v10, v8, a1
-; LMULMAX2-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-NEXT: lui a1, 1
-; LMULMAX2-NEXT: addi a1, a1, -241
-; LMULMAX2-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-NEXT: li a1, 257
-; LMULMAX2-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-NEXT: vsrl.vi v8, v8, 8
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: ctlz_zero_undef_v16i16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vle16.v v8, (a1)
-; LMULMAX1-NEXT: vle16.v v9, (a0)
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX1-NEXT: vor.vv v8, v8, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX1-NEXT: vor.vv v8, v8, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX1-NEXT: vor.vv v8, v8, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX1-NEXT: vor.vv v8, v8, v10
-; LMULMAX1-NEXT: vnot.v v8, v8
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX1-NEXT: lui a2, 5
-; LMULMAX1-NEXT: addi a2, a2, 1365
-; LMULMAX1-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-NEXT: vsub.vv v8, v8, v10
-; LMULMAX1-NEXT: lui a3, 3
-; LMULMAX1-NEXT: addi a3, a3, 819
-; LMULMAX1-NEXT: vand.vx v10, v8, a3
-; LMULMAX1-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX1-NEXT: vand.vx v8, v8, a3
-; LMULMAX1-NEXT: vadd.vv v8, v10, v8
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX1-NEXT: vadd.vv v8, v8, v10
-; LMULMAX1-NEXT: lui a4, 1
-; LMULMAX1-NEXT: addi a4, a4, -241
-; LMULMAX1-NEXT: vand.vx v8, v8, a4
-; LMULMAX1-NEXT: li a5, 257
-; LMULMAX1-NEXT: vmul.vx v8, v8, a5
-; LMULMAX1-NEXT: vsrl.vi v8, v8, 8
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 1
-; LMULMAX1-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 2
-; LMULMAX1-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 4
-; LMULMAX1-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 8
-; LMULMAX1-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-NEXT: vnot.v v9, v9
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 1
-; LMULMAX1-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-NEXT: vsub.vv v9, v9, v10
-; LMULMAX1-NEXT: vand.vx v10, v9, a3
-; LMULMAX1-NEXT: vsrl.vi v9, v9, 2
-; LMULMAX1-NEXT: vand.vx v9, v9, a3
-; LMULMAX1-NEXT: vadd.vv v9, v10, v9
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 4
-; LMULMAX1-NEXT: vadd.vv v9, v9, v10
-; LMULMAX1-NEXT: vand.vx v9, v9, a4
-; LMULMAX1-NEXT: vmul.vx v9, v9, a5
-; LMULMAX1-NEXT: vsrl.vi v9, v9, 8
-; LMULMAX1-NEXT: vse16.v v9, (a0)
-; LMULMAX1-NEXT: vse16.v v8, (a1)
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX8-LABEL: ctlz_zero_undef_v16i16:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX8-NEXT: vle16.v v8, (a0)
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v12, v8
-; LMULMAX8-NEXT: vnsrl.wi v8, v12, 23
-; LMULMAX8-NEXT: li a1, 142
-; LMULMAX8-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX8-NEXT: vse16.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RVI-LABEL: ctlz_zero_undef_v16i16:
+; RVI: # %bb.0:
+; RVI-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RVI-NEXT: vle16.v v8, (a0)
+; RVI-NEXT: vsrl.vi v10, v8, 1
+; RVI-NEXT: vor.vv v8, v8, v10
+; RVI-NEXT: vsrl.vi v10, v8, 2
+; RVI-NEXT: vor.vv v8, v8, v10
+; RVI-NEXT: vsrl.vi v10, v8, 4
+; RVI-NEXT: vor.vv v8, v8, v10
+; RVI-NEXT: vsrl.vi v10, v8, 8
+; RVI-NEXT: vor.vv v8, v8, v10
+; RVI-NEXT: vnot.v v8, v8
+; RVI-NEXT: vsrl.vi v10, v8, 1
+; RVI-NEXT: lui a1, 5
+; RVI-NEXT: addi a1, a1, 1365
+; RVI-NEXT: vand.vx v10, v10, a1
+; RVI-NEXT: vsub.vv v8, v8, v10
+; RVI-NEXT: lui a1, 3
+; RVI-NEXT: addi a1, a1, 819
+; RVI-NEXT: vand.vx v10, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 2
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: vadd.vv v8, v10, v8
+; RVI-NEXT: vsrl.vi v10, v8, 4
+; RVI-NEXT: vadd.vv v8, v8, v10
+; RVI-NEXT: lui a1, 1
+; RVI-NEXT: addi a1, a1, -241
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: li a1, 257
+; RVI-NEXT: vmul.vx v8, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 8
+; RVI-NEXT: vse16.v v8, (a0)
+; RVI-NEXT: ret
+;
+; RVF-LABEL: ctlz_zero_undef_v16i16:
+; RVF: # %bb.0:
+; RVF-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RVF-NEXT: vle16.v v8, (a0)
+; RVF-NEXT: vfwcvt.f.xu.v v12, v8
+; RVF-NEXT: vnsrl.wi v8, v12, 23
+; RVF-NEXT: li a1, 142
+; RVF-NEXT: vrsub.vx v8, v8, a1
+; RVF-NEXT: vse16.v v8, (a0)
+; RVF-NEXT: ret
+;
+; RVD-LABEL: ctlz_zero_undef_v16i16:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RVD-NEXT: vle16.v v8, (a0)
+; RVD-NEXT: vfwcvt.f.xu.v v12, v8
+; RVD-NEXT: vnsrl.wi v8, v12, 23
+; RVD-NEXT: li a1, 142
+; RVD-NEXT: vrsub.vx v8, v8, a1
+; RVD-NEXT: vse16.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: ctlz_zero_undef_v16i16:
; ZVBB: # %bb.0:
@@ -2022,145 +1349,68 @@ define void @ctlz_zero_undef_v16i16(ptr %x, ptr %y) nounwind {
}
define void @ctlz_zero_undef_v8i32(ptr %x, ptr %y) nounwind {
-; LMULMAX2-RV32I-LABEL: ctlz_zero_undef_v8i32:
-; LMULMAX2-RV32I: # %bb.0:
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 16
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV32I-NEXT: lui a1, 349525
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV32I-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV32I-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: lui a1, 209715
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV32I-NEXT: vand.vx v10, v8, a1
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV32I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: lui a1, 61681
-; LMULMAX2-RV32I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV32I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: lui a1, 4112
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 257
-; LMULMAX2-RV32I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 24
-; LMULMAX2-RV32I-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: ret
-;
-; LMULMAX2-RV64I-LABEL: ctlz_zero_undef_v8i32:
-; LMULMAX2-RV64I: # %bb.0:
-; LMULMAX2-RV64I-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV64I-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 16
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV64I-NEXT: lui a1, 349525
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV64I-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV64I-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: lui a1, 209715
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV64I-NEXT: vand.vx v10, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: lui a1, 61681
-; LMULMAX2-RV64I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: lui a1, 4112
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 257
-; LMULMAX2-RV64I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 24
-; LMULMAX2-RV64I-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: ret
-;
-; LMULMAX2-RV32F-LABEL: ctlz_zero_undef_v8i32:
-; LMULMAX2-RV32F: # %bb.0:
-; LMULMAX2-RV32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV32F-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32F-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV32F-NEXT: fsrm a1
-; LMULMAX2-RV32F-NEXT: vsrl.vi v8, v8, 23
-; LMULMAX2-RV32F-NEXT: li a1, 158
-; LMULMAX2-RV32F-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV32F-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: ret
-;
-; LMULMAX2-RV64F-LABEL: ctlz_zero_undef_v8i32:
-; LMULMAX2-RV64F: # %bb.0:
-; LMULMAX2-RV64F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV64F-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64F-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV64F-NEXT: fsrm a1
-; LMULMAX2-RV64F-NEXT: vsrl.vi v8, v8, 23
-; LMULMAX2-RV64F-NEXT: li a1, 158
-; LMULMAX2-RV64F-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV64F-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: ret
-;
-; LMULMAX2-RV32D-LABEL: ctlz_zero_undef_v8i32:
-; LMULMAX2-RV32D: # %bb.0:
-; LMULMAX2-RV32D-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV32D-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32D-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV32D-NEXT: fsrm a1
-; LMULMAX2-RV32D-NEXT: vsrl.vi v8, v8, 23
-; LMULMAX2-RV32D-NEXT: li a1, 158
-; LMULMAX2-RV32D-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV32D-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: ret
-;
-; LMULMAX2-RV64D-LABEL: ctlz_zero_undef_v8i32:
-; LMULMAX2-RV64D: # %bb.0:
-; LMULMAX2-RV64D-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV64D-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64D-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV64D-NEXT: fsrm a1
-; LMULMAX2-RV64D-NEXT: vsrl.vi v8, v8, 23
-; LMULMAX2-RV64D-NEXT: li a1, 158
-; LMULMAX2-RV64D-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV64D-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: ret
-;
-; LMULMAX8-LABEL: ctlz_zero_undef_v8i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX8-NEXT: vle32.v v8, (a0)
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v12, v8
-; LMULMAX8-NEXT: li a1, 52
-; LMULMAX8-NEXT: vnsrl.wx v8, v12, a1
-; LMULMAX8-NEXT: li a1, 1054
-; LMULMAX8-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX8-NEXT: vse32.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RVI-LABEL: ctlz_zero_undef_v8i32:
+; RVI: # %bb.0:
+; RVI-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RVI-NEXT: vle32.v v8, (a0)
+; RVI-NEXT: vsrl.vi v10, v8, 1
+; RVI-NEXT: vor.vv v8, v8, v10
+; RVI-NEXT: vsrl.vi v10, v8, 2
+; RVI-NEXT: vor.vv v8, v8, v10
+; RVI-NEXT: vsrl.vi v10, v8, 4
+; RVI-NEXT: vor.vv v8, v8, v10
+; RVI-NEXT: vsrl.vi v10, v8, 8
+; RVI-NEXT: vor.vv v8, v8, v10
+; RVI-NEXT: vsrl.vi v10, v8, 16
+; RVI-NEXT: vor.vv v8, v8, v10
+; RVI-NEXT: vnot.v v8, v8
+; RVI-NEXT: vsrl.vi v10, v8, 1
+; RVI-NEXT: lui a1, 349525
+; RVI-NEXT: addi a1, a1, 1365
+; RVI-NEXT: vand.vx v10, v10, a1
+; RVI-NEXT: vsub.vv v8, v8, v10
+; RVI-NEXT: lui a1, 209715
+; RVI-NEXT: addi a1, a1, 819
+; RVI-NEXT: vand.vx v10, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 2
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: vadd.vv v8, v10, v8
+; RVI-NEXT: vsrl.vi v10, v8, 4
+; RVI-NEXT: vadd.vv v8, v8, v10
+; RVI-NEXT: lui a1, 61681
+; RVI-NEXT: addi a1, a1, -241
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: lui a1, 4112
+; RVI-NEXT: addi a1, a1, 257
+; RVI-NEXT: vmul.vx v8, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 24
+; RVI-NEXT: vse32.v v8, (a0)
+; RVI-NEXT: ret
+;
+; RVF-LABEL: ctlz_zero_undef_v8i32:
+; RVF: # %bb.0:
+; RVF-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RVF-NEXT: vle32.v v8, (a0)
+; RVF-NEXT: fsrmi a1, 1
+; RVF-NEXT: vfcvt.f.xu.v v8, v8
+; RVF-NEXT: fsrm a1
+; RVF-NEXT: vsrl.vi v8, v8, 23
+; RVF-NEXT: li a1, 158
+; RVF-NEXT: vrsub.vx v8, v8, a1
+; RVF-NEXT: vse32.v v8, (a0)
+; RVF-NEXT: ret
+;
+; RVD-LABEL: ctlz_zero_undef_v8i32:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RVD-NEXT: vle32.v v8, (a0)
+; RVD-NEXT: vfwcvt.f.xu.v v12, v8
+; RVD-NEXT: li a1, 52
+; RVD-NEXT: vnsrl.wx v8, v12, a1
+; RVD-NEXT: li a1, 1054
+; RVD-NEXT: vrsub.vx v8, v8, a1
+; RVD-NEXT: vse32.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: ctlz_zero_undef_v8i32:
; ZVBB: # %bb.0:
@@ -2177,180 +1427,152 @@ define void @ctlz_zero_undef_v8i32(ptr %x, ptr %y) nounwind {
}
define void @ctlz_zero_undef_v4i64(ptr %x, ptr %y) nounwind {
-; LMULMAX2-RV32I-LABEL: ctlz_zero_undef_v4i64:
-; LMULMAX2-RV32I: # %bb.0:
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 16
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: li a1, 32
-; LMULMAX2-RV32I-NEXT: vsrl.vx v10, v8, a1
-; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV32I-NEXT: lui a1, 349525
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v12, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vand.vv v10, v10, v12
-; LMULMAX2-RV32I-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: lui a1, 209715
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vand.vv v12, v8, v10
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v12, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: lui a1, 61681
-; LMULMAX2-RV32I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: lui a1, 4112
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 257
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vmul.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: li a1, 56
-; LMULMAX2-RV32I-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: ret
-;
-; LMULMAX2-RV64I-LABEL: ctlz_zero_undef_v4i64:
-; LMULMAX2-RV64I: # %bb.0:
-; LMULMAX2-RV64I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV64I-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 2
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 8
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 16
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: li a1, 32
-; LMULMAX2-RV64I-NEXT: vsrl.vx v10, v8, a1
-; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV64I-NEXT: lui a1, 349525
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, 1365
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV64I-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: lui a1, 209715
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, 819
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vand.vx v10, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: lui a1, 61681
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, -241
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: lui a1, 4112
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, 257
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: li a1, 56
-; LMULMAX2-RV64I-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: ret
-;
-; LMULMAX2-RV32F-LABEL: ctlz_zero_undef_v4i64:
-; LMULMAX2-RV32F: # %bb.0:
-; LMULMAX2-RV32F-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32F-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: li a1, 190
-; LMULMAX2-RV32F-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; LMULMAX2-RV32F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32F-NEXT: vfncvt.f.xu.w v12, v8
-; LMULMAX2-RV32F-NEXT: fsrm a1
-; LMULMAX2-RV32F-NEXT: vsrl.vi v8, v12, 23
-; LMULMAX2-RV32F-NEXT: vwsubu.wv v10, v10, v8
-; LMULMAX2-RV32F-NEXT: vse64.v v10, (a0)
-; LMULMAX2-RV32F-NEXT: ret
-;
-; LMULMAX2-RV64F-LABEL: ctlz_zero_undef_v4i64:
-; LMULMAX2-RV64F: # %bb.0:
-; LMULMAX2-RV64F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX2-RV64F-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: li a1, 190
-; LMULMAX2-RV64F-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV64F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64F-NEXT: vfncvt.f.xu.w v11, v8
-; LMULMAX2-RV64F-NEXT: fsrm a1
-; LMULMAX2-RV64F-NEXT: vsrl.vi v8, v11, 23
-; LMULMAX2-RV64F-NEXT: vwsubu.vv v12, v10, v8
-; LMULMAX2-RV64F-NEXT: vse64.v v12, (a0)
-; LMULMAX2-RV64F-NEXT: ret
-;
-; LMULMAX2-RV32D-LABEL: ctlz_zero_undef_v4i64:
-; LMULMAX2-RV32D: # %bb.0:
-; LMULMAX2-RV32D-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32D-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32D-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV32D-NEXT: fsrm a1
-; LMULMAX2-RV32D-NEXT: li a1, 52
-; LMULMAX2-RV32D-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV32D-NEXT: li a1, 1086
-; LMULMAX2-RV32D-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV32D-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: ret
-;
-; LMULMAX2-RV64D-LABEL: ctlz_zero_undef_v4i64:
-; LMULMAX2-RV64D: # %bb.0:
-; LMULMAX2-RV64D-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV64D-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64D-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV64D-NEXT: fsrm a1
-; LMULMAX2-RV64D-NEXT: li a1, 52
-; LMULMAX2-RV64D-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV64D-NEXT: li a1, 1086
-; LMULMAX2-RV64D-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX2-RV64D-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: ret
-;
-; LMULMAX8-LABEL: ctlz_zero_undef_v4i64:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX8-NEXT: vle64.v v8, (a0)
-; LMULMAX8-NEXT: fsrmi a1, 1
-; LMULMAX8-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX8-NEXT: fsrm a1
-; LMULMAX8-NEXT: li a1, 52
-; LMULMAX8-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX8-NEXT: li a1, 1086
-; LMULMAX8-NEXT: vrsub.vx v8, v8, a1
-; LMULMAX8-NEXT: vse64.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RV32I-LABEL: ctlz_zero_undef_v4i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32I-NEXT: vle64.v v8, (a0)
+; RV32I-NEXT: vsrl.vi v10, v8, 1
+; RV32I-NEXT: vor.vv v8, v8, v10
+; RV32I-NEXT: vsrl.vi v10, v8, 2
+; RV32I-NEXT: vor.vv v8, v8, v10
+; RV32I-NEXT: vsrl.vi v10, v8, 4
+; RV32I-NEXT: vor.vv v8, v8, v10
+; RV32I-NEXT: vsrl.vi v10, v8, 8
+; RV32I-NEXT: vor.vv v8, v8, v10
+; RV32I-NEXT: vsrl.vi v10, v8, 16
+; RV32I-NEXT: vor.vv v8, v8, v10
+; RV32I-NEXT: li a1, 32
+; RV32I-NEXT: vsrl.vx v10, v8, a1
+; RV32I-NEXT: vor.vv v8, v8, v10
+; RV32I-NEXT: vnot.v v8, v8
+; RV32I-NEXT: vsrl.vi v10, v8, 1
+; RV32I-NEXT: lui a1, 349525
+; RV32I-NEXT: addi a1, a1, 1365
+; RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; RV32I-NEXT: vmv.v.x v12, a1
+; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32I-NEXT: vand.vv v10, v10, v12
+; RV32I-NEXT: vsub.vv v8, v8, v10
+; RV32I-NEXT: lui a1, 209715
+; RV32I-NEXT: addi a1, a1, 819
+; RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; RV32I-NEXT: vmv.v.x v10, a1
+; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32I-NEXT: vand.vv v12, v8, v10
+; RV32I-NEXT: vsrl.vi v8, v8, 2
+; RV32I-NEXT: vand.vv v8, v8, v10
+; RV32I-NEXT: vadd.vv v8, v12, v8
+; RV32I-NEXT: vsrl.vi v10, v8, 4
+; RV32I-NEXT: vadd.vv v8, v8, v10
+; RV32I-NEXT: lui a1, 61681
+; RV32I-NEXT: addi a1, a1, -241
+; RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; RV32I-NEXT: vmv.v.x v10, a1
+; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32I-NEXT: vand.vv v8, v8, v10
+; RV32I-NEXT: lui a1, 4112
+; RV32I-NEXT: addi a1, a1, 257
+; RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; RV32I-NEXT: vmv.v.x v10, a1
+; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32I-NEXT: vmul.vv v8, v8, v10
+; RV32I-NEXT: li a1, 56
+; RV32I-NEXT: vsrl.vx v8, v8, a1
+; RV32I-NEXT: vse64.v v8, (a0)
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: ctlz_zero_undef_v4i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64I-NEXT: vle64.v v8, (a0)
+; RV64I-NEXT: vsrl.vi v10, v8, 1
+; RV64I-NEXT: vor.vv v8, v8, v10
+; RV64I-NEXT: vsrl.vi v10, v8, 2
+; RV64I-NEXT: vor.vv v8, v8, v10
+; RV64I-NEXT: vsrl.vi v10, v8, 4
+; RV64I-NEXT: vor.vv v8, v8, v10
+; RV64I-NEXT: vsrl.vi v10, v8, 8
+; RV64I-NEXT: vor.vv v8, v8, v10
+; RV64I-NEXT: vsrl.vi v10, v8, 16
+; RV64I-NEXT: vor.vv v8, v8, v10
+; RV64I-NEXT: li a1, 32
+; RV64I-NEXT: vsrl.vx v10, v8, a1
+; RV64I-NEXT: vor.vv v8, v8, v10
+; RV64I-NEXT: vnot.v v8, v8
+; RV64I-NEXT: vsrl.vi v10, v8, 1
+; RV64I-NEXT: lui a1, 349525
+; RV64I-NEXT: addiw a1, a1, 1365
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vand.vx v10, v10, a1
+; RV64I-NEXT: vsub.vv v8, v8, v10
+; RV64I-NEXT: lui a1, 209715
+; RV64I-NEXT: addiw a1, a1, 819
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vand.vx v10, v8, a1
+; RV64I-NEXT: vsrl.vi v8, v8, 2
+; RV64I-NEXT: vand.vx v8, v8, a1
+; RV64I-NEXT: vadd.vv v8, v10, v8
+; RV64I-NEXT: vsrl.vi v10, v8, 4
+; RV64I-NEXT: vadd.vv v8, v8, v10
+; RV64I-NEXT: lui a1, 61681
+; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vand.vx v8, v8, a1
+; RV64I-NEXT: lui a1, 4112
+; RV64I-NEXT: addiw a1, a1, 257
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vmul.vx v8, v8, a1
+; RV64I-NEXT: li a1, 56
+; RV64I-NEXT: vsrl.vx v8, v8, a1
+; RV64I-NEXT: vse64.v v8, (a0)
+; RV64I-NEXT: ret
+;
+; RV32F-LABEL: ctlz_zero_undef_v4i64:
+; RV32F: # %bb.0:
+; RV32F-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32F-NEXT: vle64.v v8, (a0)
+; RV32F-NEXT: li a1, 190
+; RV32F-NEXT: vmv.v.x v10, a1
+; RV32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV32F-NEXT: fsrmi a1, 1
+; RV32F-NEXT: vfncvt.f.xu.w v12, v8
+; RV32F-NEXT: fsrm a1
+; RV32F-NEXT: vsrl.vi v8, v12, 23
+; RV32F-NEXT: vwsubu.wv v10, v10, v8
+; RV32F-NEXT: vse64.v v10, (a0)
+; RV32F-NEXT: ret
+;
+; RV64F-LABEL: ctlz_zero_undef_v4i64:
+; RV64F: # %bb.0:
+; RV64F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV64F-NEXT: vle64.v v8, (a0)
+; RV64F-NEXT: li a1, 190
+; RV64F-NEXT: vmv.v.x v10, a1
+; RV64F-NEXT: fsrmi a1, 1
+; RV64F-NEXT: vfncvt.f.xu.w v11, v8
+; RV64F-NEXT: fsrm a1
+; RV64F-NEXT: vsrl.vi v8, v11, 23
+; RV64F-NEXT: vwsubu.vv v12, v10, v8
+; RV64F-NEXT: vse64.v v12, (a0)
+; RV64F-NEXT: ret
+;
+; RVD-LABEL: ctlz_zero_undef_v4i64:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RVD-NEXT: vle64.v v8, (a0)
+; RVD-NEXT: fsrmi a1, 1
+; RVD-NEXT: vfcvt.f.xu.v v8, v8
+; RVD-NEXT: fsrm a1
+; RVD-NEXT: li a1, 52
+; RVD-NEXT: vsrl.vx v8, v8, a1
+; RVD-NEXT: li a1, 1086
+; RVD-NEXT: vrsub.vx v8, v8, a1
+; RVD-NEXT: vse64.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: ctlz_zero_undef_v4i64:
; ZVBB: # %bb.0:
@@ -2366,7 +1588,5 @@ define void @ctlz_zero_undef_v4i64(ptr %x, ptr %y) nounwind {
ret void
}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; LMULMAX1-RV32: {{.*}}
-; LMULMAX1-RV64: {{.*}}
-; LMULMAX2-RV32: {{.*}}
-; LMULMAX2-RV64: {{.*}}
+; RV32D: {{.*}}
+; RV64D: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll
index 5e0c99f..147f560 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll
@@ -1,8 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2,LMULMAX2-RV32
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2,LMULMAX2-RV64
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1-RV32
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1-RV64
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
; RUN: llc -mtriple=riscv32 -mattr=+v,+zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVBB
; RUN: llc -mtriple=riscv64 -mattr=+v,+zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVBB
@@ -127,157 +125,81 @@ define void @ctpop_v4i32(ptr %x, ptr %y) {
declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>)
define void @ctpop_v2i64(ptr %x, ptr %y) {
-; LMULMAX2-RV32-LABEL: ctpop_v2i64:
-; LMULMAX2-RV32: # %bb.0:
-; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32-NEXT: lui a1, 349525
-; LMULMAX2-RV32-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma
-; LMULMAX2-RV32-NEXT: vmv.v.x v9, a1
-; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV32-NEXT: vand.vv v9, v10, v9
-; LMULMAX2-RV32-NEXT: vsub.vv v8, v8, v9
-; LMULMAX2-RV32-NEXT: lui a1, 209715
-; LMULMAX2-RV32-NEXT: addi a1, a1, 819
-; LMULMAX2-RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma
-; LMULMAX2-RV32-NEXT: vmv.v.x v9, a1
-; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32-NEXT: vand.vv v10, v8, v9
-; LMULMAX2-RV32-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV32-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-RV32-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v9
-; LMULMAX2-RV32-NEXT: lui a1, 61681
-; LMULMAX2-RV32-NEXT: addi a1, a1, -241
-; LMULMAX2-RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma
-; LMULMAX2-RV32-NEXT: vmv.v.x v9, a1
-; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV32-NEXT: lui a1, 4112
-; LMULMAX2-RV32-NEXT: addi a1, a1, 257
-; LMULMAX2-RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma
-; LMULMAX2-RV32-NEXT: vmv.v.x v9, a1
-; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32-NEXT: vmul.vv v8, v8, v9
-; LMULMAX2-RV32-NEXT: li a1, 56
-; LMULMAX2-RV32-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32-NEXT: ret
-;
-; LMULMAX2-RV64-LABEL: ctpop_v2i64:
-; LMULMAX2-RV64: # %bb.0:
-; LMULMAX2-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV64-NEXT: lui a1, 349525
-; LMULMAX2-RV64-NEXT: addiw a1, a1, 1365
-; LMULMAX2-RV64-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64-NEXT: add a1, a1, a2
-; LMULMAX2-RV64-NEXT: vand.vx v9, v9, a1
-; LMULMAX2-RV64-NEXT: vsub.vv v8, v8, v9
-; LMULMAX2-RV64-NEXT: lui a1, 209715
-; LMULMAX2-RV64-NEXT: addiw a1, a1, 819
-; LMULMAX2-RV64-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64-NEXT: add a1, a1, a2
-; LMULMAX2-RV64-NEXT: vand.vx v9, v8, a1
-; LMULMAX2-RV64-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT: vadd.vv v8, v9, v8
-; LMULMAX2-RV64-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV64-NEXT: vadd.vv v8, v8, v9
-; LMULMAX2-RV64-NEXT: lui a1, 61681
-; LMULMAX2-RV64-NEXT: addiw a1, a1, -241
-; LMULMAX2-RV64-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64-NEXT: add a1, a1, a2
-; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT: lui a1, 4112
-; LMULMAX2-RV64-NEXT: addiw a1, a1, 257
-; LMULMAX2-RV64-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64-NEXT: add a1, a1, a2
-; LMULMAX2-RV64-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT: li a1, 56
-; LMULMAX2-RV64-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: ctpop_v2i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: lui a1, 349525
-; LMULMAX1-RV32-NEXT: addi a1, a1, 1365
-; LMULMAX1-RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.x v9, a1
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX1-RV32-NEXT: vand.vv v9, v10, v9
-; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v9
-; LMULMAX1-RV32-NEXT: lui a1, 209715
-; LMULMAX1-RV32-NEXT: addi a1, a1, 819
-; LMULMAX1-RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.x v9, a1
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vand.vv v10, v8, v9
-; LMULMAX1-RV32-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v9
-; LMULMAX1-RV32-NEXT: vadd.vv v8, v10, v8
-; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v9
-; LMULMAX1-RV32-NEXT: lui a1, 61681
-; LMULMAX1-RV32-NEXT: addi a1, a1, -241
-; LMULMAX1-RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.x v9, a1
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v9
-; LMULMAX1-RV32-NEXT: lui a1, 4112
-; LMULMAX1-RV32-NEXT: addi a1, a1, 257
-; LMULMAX1-RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.x v9, a1
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmul.vv v8, v8, v9
-; LMULMAX1-RV32-NEXT: li a1, 56
-; LMULMAX1-RV32-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: ctpop_v2i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX1-RV64-NEXT: lui a1, 349525
-; LMULMAX1-RV64-NEXT: addiw a1, a1, 1365
-; LMULMAX1-RV64-NEXT: slli a2, a1, 32
-; LMULMAX1-RV64-NEXT: add a1, a1, a2
-; LMULMAX1-RV64-NEXT: vand.vx v9, v9, a1
-; LMULMAX1-RV64-NEXT: vsub.vv v8, v8, v9
-; LMULMAX1-RV64-NEXT: lui a1, 209715
-; LMULMAX1-RV64-NEXT: addiw a1, a1, 819
-; LMULMAX1-RV64-NEXT: slli a2, a1, 32
-; LMULMAX1-RV64-NEXT: add a1, a1, a2
-; LMULMAX1-RV64-NEXT: vand.vx v9, v8, a1
-; LMULMAX1-RV64-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a1
-; LMULMAX1-RV64-NEXT: vadd.vv v8, v9, v8
-; LMULMAX1-RV64-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX1-RV64-NEXT: vadd.vv v8, v8, v9
-; LMULMAX1-RV64-NEXT: lui a1, 61681
-; LMULMAX1-RV64-NEXT: addiw a1, a1, -241
-; LMULMAX1-RV64-NEXT: slli a2, a1, 32
-; LMULMAX1-RV64-NEXT: add a1, a1, a2
-; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a1
-; LMULMAX1-RV64-NEXT: lui a1, 4112
-; LMULMAX1-RV64-NEXT: addiw a1, a1, 257
-; LMULMAX1-RV64-NEXT: slli a2, a1, 32
-; LMULMAX1-RV64-NEXT: add a1, a1, a2
-; LMULMAX1-RV64-NEXT: vmul.vx v8, v8, a1
-; LMULMAX1-RV64-NEXT: li a1, 56
-; LMULMAX1-RV64-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: ret
+; RV32-LABEL: ctpop_v2i64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT: vle64.v v8, (a0)
+; RV32-NEXT: lui a1, 349525
+; RV32-NEXT: addi a1, a1, 1365
+; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma
+; RV32-NEXT: vmv.v.x v9, a1
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT: vsrl.vi v10, v8, 1
+; RV32-NEXT: vand.vv v9, v10, v9
+; RV32-NEXT: vsub.vv v8, v8, v9
+; RV32-NEXT: lui a1, 209715
+; RV32-NEXT: addi a1, a1, 819
+; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma
+; RV32-NEXT: vmv.v.x v9, a1
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT: vand.vv v10, v8, v9
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vv v8, v8, v9
+; RV32-NEXT: vadd.vv v8, v10, v8
+; RV32-NEXT: vsrl.vi v9, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v9
+; RV32-NEXT: lui a1, 61681
+; RV32-NEXT: addi a1, a1, -241
+; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma
+; RV32-NEXT: vmv.v.x v9, a1
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v9
+; RV32-NEXT: lui a1, 4112
+; RV32-NEXT: addi a1, a1, 257
+; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma
+; RV32-NEXT: vmv.v.x v9, a1
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT: vmul.vv v8, v8, v9
+; RV32-NEXT: li a1, 56
+; RV32-NEXT: vsrl.vx v8, v8, a1
+; RV32-NEXT: vse64.v v8, (a0)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ctpop_v2i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT: vle64.v v8, (a0)
+; RV64-NEXT: vsrl.vi v9, v8, 1
+; RV64-NEXT: lui a1, 349525
+; RV64-NEXT: addiw a1, a1, 1365
+; RV64-NEXT: slli a2, a1, 32
+; RV64-NEXT: add a1, a1, a2
+; RV64-NEXT: vand.vx v9, v9, a1
+; RV64-NEXT: vsub.vv v8, v8, v9
+; RV64-NEXT: lui a1, 209715
+; RV64-NEXT: addiw a1, a1, 819
+; RV64-NEXT: slli a2, a1, 32
+; RV64-NEXT: add a1, a1, a2
+; RV64-NEXT: vand.vx v9, v8, a1
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a1
+; RV64-NEXT: vadd.vv v8, v9, v8
+; RV64-NEXT: vsrl.vi v9, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v9
+; RV64-NEXT: lui a1, 61681
+; RV64-NEXT: addiw a1, a1, -241
+; RV64-NEXT: slli a2, a1, 32
+; RV64-NEXT: add a1, a1, a2
+; RV64-NEXT: vand.vx v8, v8, a1
+; RV64-NEXT: lui a1, 4112
+; RV64-NEXT: addiw a1, a1, 257
+; RV64-NEXT: slli a2, a1, 32
+; RV64-NEXT: add a1, a1, a2
+; RV64-NEXT: vmul.vx v8, v8, a1
+; RV64-NEXT: li a1, 56
+; RV64-NEXT: vsrl.vx v8, v8, a1
+; RV64-NEXT: vse64.v v8, (a0)
+; RV64-NEXT: ret
;
; ZVBB-LABEL: ctpop_v2i64:
; ZVBB: # %bb.0:
@@ -295,57 +217,25 @@ define void @ctpop_v2i64(ptr %x, ptr %y) {
declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>)
define void @ctpop_v32i8(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: ctpop_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a1, 32
-; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-NEXT: li a1, 85
-; LMULMAX2-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-NEXT: li a1, 51
-; LMULMAX2-NEXT: vand.vx v10, v8, a1
-; LMULMAX2-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-NEXT: vand.vi v8, v8, 15
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: ctpop_v32i8:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vle8.v v8, (a1)
-; LMULMAX1-NEXT: vle8.v v9, (a0)
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX1-NEXT: li a2, 85
-; LMULMAX1-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-NEXT: vsub.vv v8, v8, v10
-; LMULMAX1-NEXT: li a3, 51
-; LMULMAX1-NEXT: vand.vx v10, v8, a3
-; LMULMAX1-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX1-NEXT: vand.vx v8, v8, a3
-; LMULMAX1-NEXT: vadd.vv v8, v10, v8
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX1-NEXT: vadd.vv v8, v8, v10
-; LMULMAX1-NEXT: vand.vi v8, v8, 15
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 1
-; LMULMAX1-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-NEXT: vsub.vv v9, v9, v10
-; LMULMAX1-NEXT: vand.vx v10, v9, a3
-; LMULMAX1-NEXT: vsrl.vi v9, v9, 2
-; LMULMAX1-NEXT: vand.vx v9, v9, a3
-; LMULMAX1-NEXT: vadd.vv v9, v10, v9
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 4
-; LMULMAX1-NEXT: vadd.vv v9, v9, v10
-; LMULMAX1-NEXT: vand.vi v9, v9, 15
-; LMULMAX1-NEXT: vse8.v v9, (a0)
-; LMULMAX1-NEXT: vse8.v v8, (a1)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: ctpop_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 32
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsrl.vi v10, v8, 1
+; CHECK-NEXT: li a1, 85
+; CHECK-NEXT: vand.vx v10, v10, a1
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: li a1, 51
+; CHECK-NEXT: vand.vx v10, v8, a1
+; CHECK-NEXT: vsrl.vi v8, v8, 2
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vsrl.vi v10, v8, 4
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vand.vi v8, v8, 15
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
;
; ZVBB-LABEL: ctpop_v32i8:
; ZVBB: # %bb.0:
@@ -364,72 +254,31 @@ define void @ctpop_v32i8(ptr %x, ptr %y) {
declare <32 x i8> @llvm.ctpop.v32i8(<32 x i8>)
define void @ctpop_v16i16(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: ctpop_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-NEXT: lui a1, 5
-; LMULMAX2-NEXT: addi a1, a1, 1365
-; LMULMAX2-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-NEXT: lui a1, 3
-; LMULMAX2-NEXT: addi a1, a1, 819
-; LMULMAX2-NEXT: vand.vx v10, v8, a1
-; LMULMAX2-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-NEXT: lui a1, 1
-; LMULMAX2-NEXT: addi a1, a1, -241
-; LMULMAX2-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-NEXT: li a1, 257
-; LMULMAX2-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-NEXT: vsrl.vi v8, v8, 8
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: ctpop_v16i16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vle16.v v8, (a1)
-; LMULMAX1-NEXT: vle16.v v9, (a0)
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX1-NEXT: lui a2, 5
-; LMULMAX1-NEXT: addi a2, a2, 1365
-; LMULMAX1-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-NEXT: vsub.vv v8, v8, v10
-; LMULMAX1-NEXT: lui a3, 3
-; LMULMAX1-NEXT: addi a3, a3, 819
-; LMULMAX1-NEXT: vand.vx v10, v8, a3
-; LMULMAX1-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX1-NEXT: vand.vx v8, v8, a3
-; LMULMAX1-NEXT: vadd.vv v8, v10, v8
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX1-NEXT: vadd.vv v8, v8, v10
-; LMULMAX1-NEXT: lui a4, 1
-; LMULMAX1-NEXT: addi a4, a4, -241
-; LMULMAX1-NEXT: vand.vx v8, v8, a4
-; LMULMAX1-NEXT: li a5, 257
-; LMULMAX1-NEXT: vmul.vx v8, v8, a5
-; LMULMAX1-NEXT: vsrl.vi v8, v8, 8
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 1
-; LMULMAX1-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-NEXT: vsub.vv v9, v9, v10
-; LMULMAX1-NEXT: vand.vx v10, v9, a3
-; LMULMAX1-NEXT: vsrl.vi v9, v9, 2
-; LMULMAX1-NEXT: vand.vx v9, v9, a3
-; LMULMAX1-NEXT: vadd.vv v9, v10, v9
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 4
-; LMULMAX1-NEXT: vadd.vv v9, v9, v10
-; LMULMAX1-NEXT: vand.vx v9, v9, a4
-; LMULMAX1-NEXT: vmul.vx v9, v9, a5
-; LMULMAX1-NEXT: vsrl.vi v9, v9, 8
-; LMULMAX1-NEXT: vse16.v v9, (a0)
-; LMULMAX1-NEXT: vse16.v v8, (a1)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: ctpop_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsrl.vi v10, v8, 1
+; CHECK-NEXT: lui a1, 5
+; CHECK-NEXT: addi a1, a1, 1365
+; CHECK-NEXT: vand.vx v10, v10, a1
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: lui a1, 3
+; CHECK-NEXT: addi a1, a1, 819
+; CHECK-NEXT: vand.vx v10, v8, a1
+; CHECK-NEXT: vsrl.vi v8, v8, 2
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vsrl.vi v10, v8, 4
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: lui a1, 1
+; CHECK-NEXT: addi a1, a1, -241
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: li a1, 257
+; CHECK-NEXT: vmul.vx v8, v8, a1
+; CHECK-NEXT: vsrl.vi v8, v8, 8
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
;
; ZVBB-LABEL: ctpop_v16i16:
; ZVBB: # %bb.0:
@@ -447,74 +296,32 @@ define void @ctpop_v16i16(ptr %x, ptr %y) {
declare <16 x i16> @llvm.ctpop.v16i16(<16 x i16>)
define void @ctpop_v8i32(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: ctpop_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-NEXT: lui a1, 349525
-; LMULMAX2-NEXT: addi a1, a1, 1365
-; LMULMAX2-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-NEXT: lui a1, 209715
-; LMULMAX2-NEXT: addi a1, a1, 819
-; LMULMAX2-NEXT: vand.vx v10, v8, a1
-; LMULMAX2-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-NEXT: lui a1, 61681
-; LMULMAX2-NEXT: addi a1, a1, -241
-; LMULMAX2-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-NEXT: lui a1, 4112
-; LMULMAX2-NEXT: addi a1, a1, 257
-; LMULMAX2-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-NEXT: vsrl.vi v8, v8, 24
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: ctpop_v8i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vle32.v v8, (a1)
-; LMULMAX1-NEXT: vle32.v v9, (a0)
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX1-NEXT: lui a2, 349525
-; LMULMAX1-NEXT: addi a2, a2, 1365
-; LMULMAX1-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-NEXT: vsub.vv v8, v8, v10
-; LMULMAX1-NEXT: lui a3, 209715
-; LMULMAX1-NEXT: addi a3, a3, 819
-; LMULMAX1-NEXT: vand.vx v10, v8, a3
-; LMULMAX1-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX1-NEXT: vand.vx v8, v8, a3
-; LMULMAX1-NEXT: vadd.vv v8, v10, v8
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX1-NEXT: vadd.vv v8, v8, v10
-; LMULMAX1-NEXT: lui a4, 61681
-; LMULMAX1-NEXT: addi a4, a4, -241
-; LMULMAX1-NEXT: vand.vx v8, v8, a4
-; LMULMAX1-NEXT: lui a5, 4112
-; LMULMAX1-NEXT: addi a5, a5, 257
-; LMULMAX1-NEXT: vmul.vx v8, v8, a5
-; LMULMAX1-NEXT: vsrl.vi v8, v8, 24
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 1
-; LMULMAX1-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-NEXT: vsub.vv v9, v9, v10
-; LMULMAX1-NEXT: vand.vx v10, v9, a3
-; LMULMAX1-NEXT: vsrl.vi v9, v9, 2
-; LMULMAX1-NEXT: vand.vx v9, v9, a3
-; LMULMAX1-NEXT: vadd.vv v9, v10, v9
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 4
-; LMULMAX1-NEXT: vadd.vv v9, v9, v10
-; LMULMAX1-NEXT: vand.vx v9, v9, a4
-; LMULMAX1-NEXT: vmul.vx v9, v9, a5
-; LMULMAX1-NEXT: vsrl.vi v9, v9, 24
-; LMULMAX1-NEXT: vse32.v v9, (a0)
-; LMULMAX1-NEXT: vse32.v v8, (a1)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: ctpop_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsrl.vi v10, v8, 1
+; CHECK-NEXT: lui a1, 349525
+; CHECK-NEXT: addi a1, a1, 1365
+; CHECK-NEXT: vand.vx v10, v10, a1
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: lui a1, 209715
+; CHECK-NEXT: addi a1, a1, 819
+; CHECK-NEXT: vand.vx v10, v8, a1
+; CHECK-NEXT: vsrl.vi v8, v8, 2
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vsrl.vi v10, v8, 4
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: lui a1, 61681
+; CHECK-NEXT: addi a1, a1, -241
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: lui a1, 4112
+; CHECK-NEXT: addi a1, a1, 257
+; CHECK-NEXT: vmul.vx v8, v8, a1
+; CHECK-NEXT: vsrl.vi v8, v8, 24
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
;
; ZVBB-LABEL: ctpop_v8i32:
; ZVBB: # %bb.0:
@@ -530,38 +337,14 @@ define void @ctpop_v8i32(ptr %x, ptr %y) {
ret void
}
define <8 x i1> @ctpop_v8i32_ult_two(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: ctpop_v8i32_ult_two:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vadd.vi v10, v8, -1
-; LMULMAX2-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: ctpop_v8i32_ult_two:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vle32.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vle32.v v9, (a0)
-; LMULMAX1-NEXT: vadd.vi v10, v8, -1
-; LMULMAX1-NEXT: vand.vv v8, v8, v10
-; LMULMAX1-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v8, 0
-; LMULMAX1-NEXT: vmerge.vim v8, v8, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vadd.vi v10, v9, -1
-; LMULMAX1-NEXT: vand.vv v9, v9, v10
-; LMULMAX1-NEXT: vmseq.vi v0, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v9, 0
-; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
-; LMULMAX1-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: ctpop_v8i32_ult_two:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vadd.vi v10, v8, -1
+; CHECK-NEXT: vand.vv v8, v8, v10
+; CHECK-NEXT: vmseq.vi v0, v8, 0
+; CHECK-NEXT: ret
;
; ZVBB-LABEL: ctpop_v8i32_ult_two:
; ZVBB: # %bb.0:
@@ -577,38 +360,14 @@ define <8 x i1> @ctpop_v8i32_ult_two(ptr %x, ptr %y) {
ret <8 x i1> %cmp
}
define <8 x i1> @ctpop_v8i32_ugt_one(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: ctpop_v8i32_ugt_one:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vadd.vi v10, v8, -1
-; LMULMAX2-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: ctpop_v8i32_ugt_one:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vle32.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vle32.v v9, (a0)
-; LMULMAX1-NEXT: vadd.vi v10, v8, -1
-; LMULMAX1-NEXT: vand.vv v8, v8, v10
-; LMULMAX1-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v8, 0
-; LMULMAX1-NEXT: vmerge.vim v8, v8, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vadd.vi v10, v9, -1
-; LMULMAX1-NEXT: vand.vv v9, v9, v10
-; LMULMAX1-NEXT: vmsne.vi v0, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v9, 0
-; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
-; LMULMAX1-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: ctpop_v8i32_ugt_one:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vadd.vi v10, v8, -1
+; CHECK-NEXT: vand.vv v8, v8, v10
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
;
; ZVBB-LABEL: ctpop_v8i32_ugt_one:
; ZVBB: # %bb.0:
@@ -624,38 +383,14 @@ define <8 x i1> @ctpop_v8i32_ugt_one(ptr %x, ptr %y) {
ret <8 x i1> %cmp
}
define <8 x i1> @ctpop_v8i32_eq_one(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: ctpop_v8i32_eq_one:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vadd.vi v10, v8, -1
-; LMULMAX2-NEXT: vxor.vv v8, v8, v10
-; LMULMAX2-NEXT: vmsltu.vv v0, v10, v8
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: ctpop_v8i32_eq_one:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vle32.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vle32.v v9, (a0)
-; LMULMAX1-NEXT: vadd.vi v10, v8, -1
-; LMULMAX1-NEXT: vxor.vv v8, v8, v10
-; LMULMAX1-NEXT: vmsltu.vv v0, v10, v8
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v8, 0
-; LMULMAX1-NEXT: vmerge.vim v8, v8, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vadd.vi v10, v9, -1
-; LMULMAX1-NEXT: vxor.vv v9, v9, v10
-; LMULMAX1-NEXT: vmsltu.vv v0, v10, v9
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v9, 0
-; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
-; LMULMAX1-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: ctpop_v8i32_eq_one:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vadd.vi v10, v8, -1
+; CHECK-NEXT: vxor.vv v8, v8, v10
+; CHECK-NEXT: vmsltu.vv v0, v10, v8
+; CHECK-NEXT: ret
;
; ZVBB-LABEL: ctpop_v8i32_eq_one:
; ZVBB: # %bb.0:
@@ -671,38 +406,14 @@ define <8 x i1> @ctpop_v8i32_eq_one(ptr %x, ptr %y) {
ret <8 x i1> %cmp
}
define <8 x i1> @ctpop_v8i32_ne_one(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: ctpop_v8i32_ne_one:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vadd.vi v10, v8, -1
-; LMULMAX2-NEXT: vxor.vv v8, v8, v10
-; LMULMAX2-NEXT: vmsleu.vv v0, v8, v10
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: ctpop_v8i32_ne_one:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vle32.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vle32.v v9, (a0)
-; LMULMAX1-NEXT: vadd.vi v10, v8, -1
-; LMULMAX1-NEXT: vxor.vv v8, v8, v10
-; LMULMAX1-NEXT: vmsleu.vv v0, v8, v10
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v8, 0
-; LMULMAX1-NEXT: vmerge.vim v8, v8, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vadd.vi v10, v9, -1
-; LMULMAX1-NEXT: vxor.vv v9, v9, v10
-; LMULMAX1-NEXT: vmsleu.vv v0, v9, v10
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v9, 0
-; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
-; LMULMAX1-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: ctpop_v8i32_ne_one:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vadd.vi v10, v8, -1
+; CHECK-NEXT: vxor.vv v8, v8, v10
+; CHECK-NEXT: vmsleu.vv v0, v8, v10
+; CHECK-NEXT: ret
;
; ZVBB-LABEL: ctpop_v8i32_ne_one:
; ZVBB: # %bb.0:
@@ -720,187 +431,81 @@ define <8 x i1> @ctpop_v8i32_ne_one(ptr %x, ptr %y) {
declare <8 x i32> @llvm.ctpop.v8i32(<8 x i32>)
define void @ctpop_v4i64(ptr %x, ptr %y) {
-; LMULMAX2-RV32-LABEL: ctpop_v4i64:
-; LMULMAX2-RV32: # %bb.0:
-; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32-NEXT: lui a1, 349525
-; LMULMAX2-RV32-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vsrl.vi v12, v8, 1
-; LMULMAX2-RV32-NEXT: vand.vv v10, v12, v10
-; LMULMAX2-RV32-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-RV32-NEXT: lui a1, 209715
-; LMULMAX2-RV32-NEXT: addi a1, a1, 819
-; LMULMAX2-RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vand.vv v12, v8, v10
-; LMULMAX2-RV32-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-RV32-NEXT: vadd.vv v8, v12, v8
-; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-RV32-NEXT: lui a1, 61681
-; LMULMAX2-RV32-NEXT: addi a1, a1, -241
-; LMULMAX2-RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-RV32-NEXT: lui a1, 4112
-; LMULMAX2-RV32-NEXT: addi a1, a1, 257
-; LMULMAX2-RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vmul.vv v8, v8, v10
-; LMULMAX2-RV32-NEXT: li a1, 56
-; LMULMAX2-RV32-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32-NEXT: ret
-;
-; LMULMAX2-RV64-LABEL: ctpop_v4i64:
-; LMULMAX2-RV64: # %bb.0:
-; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV64-NEXT: lui a1, 349525
-; LMULMAX2-RV64-NEXT: addiw a1, a1, 1365
-; LMULMAX2-RV64-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64-NEXT: add a1, a1, a2
-; LMULMAX2-RV64-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV64-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-RV64-NEXT: lui a1, 209715
-; LMULMAX2-RV64-NEXT: addiw a1, a1, 819
-; LMULMAX2-RV64-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64-NEXT: add a1, a1, a2
-; LMULMAX2-RV64-NEXT: vand.vx v10, v8, a1
-; LMULMAX2-RV64-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV64-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-RV64-NEXT: lui a1, 61681
-; LMULMAX2-RV64-NEXT: addiw a1, a1, -241
-; LMULMAX2-RV64-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64-NEXT: add a1, a1, a2
-; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT: lui a1, 4112
-; LMULMAX2-RV64-NEXT: addiw a1, a1, 257
-; LMULMAX2-RV64-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64-NEXT: add a1, a1, a2
-; LMULMAX2-RV64-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT: li a1, 56
-; LMULMAX2-RV64-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: ctpop_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a1, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a1)
-; LMULMAX1-RV32-NEXT: lui a2, 349525
-; LMULMAX1-RV32-NEXT: addi a2, a2, 1365
-; LMULMAX1-RV32-NEXT: vsetvli a3, zero, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.x v10, a2
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vsrl.vi v11, v9, 1
-; LMULMAX1-RV32-NEXT: vand.vv v11, v11, v10
-; LMULMAX1-RV32-NEXT: vsub.vv v9, v9, v11
-; LMULMAX1-RV32-NEXT: lui a2, 209715
-; LMULMAX1-RV32-NEXT: addi a2, a2, 819
-; LMULMAX1-RV32-NEXT: vsetvli a3, zero, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.x v11, a2
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vand.vv v12, v9, v11
-; LMULMAX1-RV32-NEXT: vsrl.vi v9, v9, 2
-; LMULMAX1-RV32-NEXT: vand.vv v9, v9, v11
-; LMULMAX1-RV32-NEXT: vadd.vv v9, v12, v9
-; LMULMAX1-RV32-NEXT: vsrl.vi v12, v9, 4
-; LMULMAX1-RV32-NEXT: vadd.vv v9, v9, v12
-; LMULMAX1-RV32-NEXT: lui a2, 61681
-; LMULMAX1-RV32-NEXT: addi a2, a2, -241
-; LMULMAX1-RV32-NEXT: vsetvli a3, zero, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.x v12, a2
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vand.vv v9, v9, v12
-; LMULMAX1-RV32-NEXT: lui a2, 4112
-; LMULMAX1-RV32-NEXT: addi a2, a2, 257
-; LMULMAX1-RV32-NEXT: vsetvli a3, zero, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.x v13, a2
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmul.vv v9, v9, v13
-; LMULMAX1-RV32-NEXT: li a2, 56
-; LMULMAX1-RV32-NEXT: vsrl.vx v9, v9, a2
-; LMULMAX1-RV32-NEXT: vsrl.vi v14, v8, 1
-; LMULMAX1-RV32-NEXT: vand.vv v10, v14, v10
-; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v10
-; LMULMAX1-RV32-NEXT: vand.vv v10, v8, v11
-; LMULMAX1-RV32-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vadd.vv v8, v10, v8
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v10
-; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v12
-; LMULMAX1-RV32-NEXT: vmul.vv v8, v8, v13
-; LMULMAX1-RV32-NEXT: vsrl.vx v8, v8, a2
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a1)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: ctpop_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: addi a1, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a1)
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a0)
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX1-RV64-NEXT: lui a2, 349525
-; LMULMAX1-RV64-NEXT: addiw a2, a2, 1365
-; LMULMAX1-RV64-NEXT: slli a3, a2, 32
-; LMULMAX1-RV64-NEXT: add a2, a2, a3
-; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-RV64-NEXT: vsub.vv v8, v8, v10
-; LMULMAX1-RV64-NEXT: lui a3, 209715
-; LMULMAX1-RV64-NEXT: addiw a3, a3, 819
-; LMULMAX1-RV64-NEXT: slli a4, a3, 32
-; LMULMAX1-RV64-NEXT: add a3, a3, a4
-; LMULMAX1-RV64-NEXT: vand.vx v10, v8, a3
-; LMULMAX1-RV64-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a3
-; LMULMAX1-RV64-NEXT: vadd.vv v8, v10, v8
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX1-RV64-NEXT: vadd.vv v8, v8, v10
-; LMULMAX1-RV64-NEXT: lui a4, 61681
-; LMULMAX1-RV64-NEXT: addiw a4, a4, -241
-; LMULMAX1-RV64-NEXT: slli a5, a4, 32
-; LMULMAX1-RV64-NEXT: add a4, a4, a5
-; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a4
-; LMULMAX1-RV64-NEXT: lui a5, 4112
-; LMULMAX1-RV64-NEXT: addiw a5, a5, 257
-; LMULMAX1-RV64-NEXT: slli a6, a5, 32
-; LMULMAX1-RV64-NEXT: add a5, a5, a6
-; LMULMAX1-RV64-NEXT: vmul.vx v8, v8, a5
-; LMULMAX1-RV64-NEXT: li a6, 56
-; LMULMAX1-RV64-NEXT: vsrl.vx v8, v8, a6
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 1
-; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a2
-; LMULMAX1-RV64-NEXT: vsub.vv v9, v9, v10
-; LMULMAX1-RV64-NEXT: vand.vx v10, v9, a3
-; LMULMAX1-RV64-NEXT: vsrl.vi v9, v9, 2
-; LMULMAX1-RV64-NEXT: vand.vx v9, v9, a3
-; LMULMAX1-RV64-NEXT: vadd.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 4
-; LMULMAX1-RV64-NEXT: vadd.vv v9, v9, v10
-; LMULMAX1-RV64-NEXT: vand.vx v9, v9, a4
-; LMULMAX1-RV64-NEXT: vmul.vx v9, v9, a5
-; LMULMAX1-RV64-NEXT: vsrl.vx v9, v9, a6
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a1)
-; LMULMAX1-RV64-NEXT: ret
+; RV32-LABEL: ctpop_v4i64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vle64.v v8, (a0)
+; RV32-NEXT: lui a1, 349525
+; RV32-NEXT: addi a1, a1, 1365
+; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; RV32-NEXT: vmv.v.x v10, a1
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vsrl.vi v12, v8, 1
+; RV32-NEXT: vand.vv v10, v12, v10
+; RV32-NEXT: vsub.vv v8, v8, v10
+; RV32-NEXT: lui a1, 209715
+; RV32-NEXT: addi a1, a1, 819
+; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; RV32-NEXT: vmv.v.x v10, a1
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vand.vv v12, v8, v10
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vv v8, v8, v10
+; RV32-NEXT: vadd.vv v8, v12, v8
+; RV32-NEXT: vsrl.vi v10, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v10
+; RV32-NEXT: lui a1, 61681
+; RV32-NEXT: addi a1, a1, -241
+; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; RV32-NEXT: vmv.v.x v10, a1
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v10
+; RV32-NEXT: lui a1, 4112
+; RV32-NEXT: addi a1, a1, 257
+; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; RV32-NEXT: vmv.v.x v10, a1
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vmul.vv v8, v8, v10
+; RV32-NEXT: li a1, 56
+; RV32-NEXT: vsrl.vx v8, v8, a1
+; RV32-NEXT: vse64.v v8, (a0)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ctpop_v4i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vle64.v v8, (a0)
+; RV64-NEXT: vsrl.vi v10, v8, 1
+; RV64-NEXT: lui a1, 349525
+; RV64-NEXT: addiw a1, a1, 1365
+; RV64-NEXT: slli a2, a1, 32
+; RV64-NEXT: add a1, a1, a2
+; RV64-NEXT: vand.vx v10, v10, a1
+; RV64-NEXT: vsub.vv v8, v8, v10
+; RV64-NEXT: lui a1, 209715
+; RV64-NEXT: addiw a1, a1, 819
+; RV64-NEXT: slli a2, a1, 32
+; RV64-NEXT: add a1, a1, a2
+; RV64-NEXT: vand.vx v10, v8, a1
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a1
+; RV64-NEXT: vadd.vv v8, v10, v8
+; RV64-NEXT: vsrl.vi v10, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v10
+; RV64-NEXT: lui a1, 61681
+; RV64-NEXT: addiw a1, a1, -241
+; RV64-NEXT: slli a2, a1, 32
+; RV64-NEXT: add a1, a1, a2
+; RV64-NEXT: vand.vx v8, v8, a1
+; RV64-NEXT: lui a1, 4112
+; RV64-NEXT: addiw a1, a1, 257
+; RV64-NEXT: slli a2, a1, 32
+; RV64-NEXT: add a1, a1, a2
+; RV64-NEXT: vmul.vx v8, v8, a1
+; RV64-NEXT: li a1, 56
+; RV64-NEXT: vsrl.vx v8, v8, a1
+; RV64-NEXT: vse64.v v8, (a0)
+; RV64-NEXT: ret
;
; ZVBB-LABEL: ctpop_v4i64:
; ZVBB: # %bb.0:
@@ -916,68 +521,14 @@ define void @ctpop_v4i64(ptr %x, ptr %y) {
ret void
}
define <4 x i1> @ctpop_v4i64_ult_two(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: ctpop_v4i64_ult_two:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vadd.vi v10, v8, -1
-; LMULMAX2-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: ctpop_v4i64_ult_two:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a1, a0, 16
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a1)
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a0)
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.i v10, -1
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vadd.vv v11, v9, v10
-; LMULMAX1-RV32-NEXT: vand.vv v9, v9, v11
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.i v11, 0
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmseq.vv v0, v9, v11
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.i v9, 0
-; LMULMAX1-RV32-NEXT: vmerge.vim v9, v9, 1, v0
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vadd.vv v10, v8, v10
-; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v10
-; LMULMAX1-RV32-NEXT: vmseq.vv v0, v8, v11
-; LMULMAX1-RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.i v8, 0
-; LMULMAX1-RV32-NEXT: vmerge.vim v8, v8, 1, v0
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-RV32-NEXT: vslideup.vi v9, v8, 2
-; LMULMAX1-RV32-NEXT: vmsne.vi v0, v9, 0
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: ctpop_v4i64_ult_two:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a0, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a0)
-; LMULMAX1-RV64-NEXT: vadd.vi v10, v8, -1
-; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v10
-; LMULMAX1-RV64-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-RV64-NEXT: vmv.v.i v8, 0
-; LMULMAX1-RV64-NEXT: vmerge.vim v8, v8, 1, v0
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vadd.vi v10, v9, -1
-; LMULMAX1-RV64-NEXT: vand.vv v9, v9, v10
-; LMULMAX1-RV64-NEXT: vmseq.vi v0, v9, 0
-; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-RV64-NEXT: vmv.v.i v9, 0
-; LMULMAX1-RV64-NEXT: vmerge.vim v9, v9, 1, v0
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-RV64-NEXT: vslideup.vi v8, v9, 2
-; LMULMAX1-RV64-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: ctpop_v4i64_ult_two:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vadd.vi v10, v8, -1
+; CHECK-NEXT: vand.vv v8, v8, v10
+; CHECK-NEXT: vmseq.vi v0, v8, 0
+; CHECK-NEXT: ret
;
; ZVBB-LABEL: ctpop_v4i64_ult_two:
; ZVBB: # %bb.0:
@@ -993,68 +544,14 @@ define <4 x i1> @ctpop_v4i64_ult_two(ptr %x, ptr %y) {
ret <4 x i1> %cmp
}
define <4 x i1> @ctpop_v4i64_ugt_one(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: ctpop_v4i64_ugt_one:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vadd.vi v10, v8, -1
-; LMULMAX2-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: ctpop_v4i64_ugt_one:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a1, a0, 16
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a1)
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a0)
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.i v10, -1
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vadd.vv v11, v9, v10
-; LMULMAX1-RV32-NEXT: vand.vv v9, v9, v11
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.i v11, 0
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmsne.vv v0, v9, v11
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.i v9, 0
-; LMULMAX1-RV32-NEXT: vmerge.vim v9, v9, 1, v0
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vadd.vv v10, v8, v10
-; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v10
-; LMULMAX1-RV32-NEXT: vmsne.vv v0, v8, v11
-; LMULMAX1-RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.i v8, 0
-; LMULMAX1-RV32-NEXT: vmerge.vim v8, v8, 1, v0
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-RV32-NEXT: vslideup.vi v9, v8, 2
-; LMULMAX1-RV32-NEXT: vmsne.vi v0, v9, 0
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: ctpop_v4i64_ugt_one:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a0, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a0)
-; LMULMAX1-RV64-NEXT: vadd.vi v10, v8, -1
-; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v10
-; LMULMAX1-RV64-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-RV64-NEXT: vmv.v.i v8, 0
-; LMULMAX1-RV64-NEXT: vmerge.vim v8, v8, 1, v0
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vadd.vi v10, v9, -1
-; LMULMAX1-RV64-NEXT: vand.vv v9, v9, v10
-; LMULMAX1-RV64-NEXT: vmsne.vi v0, v9, 0
-; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-RV64-NEXT: vmv.v.i v9, 0
-; LMULMAX1-RV64-NEXT: vmerge.vim v9, v9, 1, v0
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-RV64-NEXT: vslideup.vi v8, v9, 2
-; LMULMAX1-RV64-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: ctpop_v4i64_ugt_one:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vadd.vi v10, v8, -1
+; CHECK-NEXT: vand.vv v8, v8, v10
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
;
; ZVBB-LABEL: ctpop_v4i64_ugt_one:
; ZVBB: # %bb.0:
@@ -1070,65 +567,14 @@ define <4 x i1> @ctpop_v4i64_ugt_one(ptr %x, ptr %y) {
ret <4 x i1> %cmp
}
define <4 x i1> @ctpop_v4i64_eq_one(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: ctpop_v4i64_eq_one:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vadd.vi v10, v8, -1
-; LMULMAX2-NEXT: vxor.vv v8, v8, v10
-; LMULMAX2-NEXT: vmsltu.vv v0, v10, v8
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: ctpop_v4i64_eq_one:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a1, a0, 16
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a1)
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a0)
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.i v10, -1
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vadd.vv v11, v9, v10
-; LMULMAX1-RV32-NEXT: vxor.vv v9, v9, v11
-; LMULMAX1-RV32-NEXT: vmsltu.vv v0, v11, v9
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.i v9, 0
-; LMULMAX1-RV32-NEXT: vmerge.vim v9, v9, 1, v0
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vadd.vv v10, v8, v10
-; LMULMAX1-RV32-NEXT: vxor.vv v8, v8, v10
-; LMULMAX1-RV32-NEXT: vmsltu.vv v0, v10, v8
-; LMULMAX1-RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.i v8, 0
-; LMULMAX1-RV32-NEXT: vmerge.vim v8, v8, 1, v0
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-RV32-NEXT: vslideup.vi v9, v8, 2
-; LMULMAX1-RV32-NEXT: vmsne.vi v0, v9, 0
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: ctpop_v4i64_eq_one:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a0, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a0)
-; LMULMAX1-RV64-NEXT: vadd.vi v10, v8, -1
-; LMULMAX1-RV64-NEXT: vxor.vv v8, v8, v10
-; LMULMAX1-RV64-NEXT: vmsltu.vv v0, v10, v8
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-RV64-NEXT: vmv.v.i v8, 0
-; LMULMAX1-RV64-NEXT: vmerge.vim v8, v8, 1, v0
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vadd.vi v10, v9, -1
-; LMULMAX1-RV64-NEXT: vxor.vv v9, v9, v10
-; LMULMAX1-RV64-NEXT: vmsltu.vv v0, v10, v9
-; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-RV64-NEXT: vmv.v.i v9, 0
-; LMULMAX1-RV64-NEXT: vmerge.vim v9, v9, 1, v0
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-RV64-NEXT: vslideup.vi v8, v9, 2
-; LMULMAX1-RV64-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: ctpop_v4i64_eq_one:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vadd.vi v10, v8, -1
+; CHECK-NEXT: vxor.vv v8, v8, v10
+; CHECK-NEXT: vmsltu.vv v0, v10, v8
+; CHECK-NEXT: ret
;
; ZVBB-LABEL: ctpop_v4i64_eq_one:
; ZVBB: # %bb.0:
@@ -1144,65 +590,14 @@ define <4 x i1> @ctpop_v4i64_eq_one(ptr %x, ptr %y) {
ret <4 x i1> %cmp
}
define <4 x i1> @ctpop_v4i64_ne_one(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: ctpop_v4i64_ne_one:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vadd.vi v10, v8, -1
-; LMULMAX2-NEXT: vxor.vv v8, v8, v10
-; LMULMAX2-NEXT: vmsleu.vv v0, v8, v10
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: ctpop_v4i64_ne_one:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a1, a0, 16
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a1)
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a0)
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.i v10, -1
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vadd.vv v11, v9, v10
-; LMULMAX1-RV32-NEXT: vxor.vv v9, v9, v11
-; LMULMAX1-RV32-NEXT: vmsleu.vv v0, v9, v11
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.i v9, 0
-; LMULMAX1-RV32-NEXT: vmerge.vim v9, v9, 1, v0
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vadd.vv v10, v8, v10
-; LMULMAX1-RV32-NEXT: vxor.vv v8, v8, v10
-; LMULMAX1-RV32-NEXT: vmsleu.vv v0, v8, v10
-; LMULMAX1-RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.i v8, 0
-; LMULMAX1-RV32-NEXT: vmerge.vim v8, v8, 1, v0
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-RV32-NEXT: vslideup.vi v9, v8, 2
-; LMULMAX1-RV32-NEXT: vmsne.vi v0, v9, 0
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: ctpop_v4i64_ne_one:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a0, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a0)
-; LMULMAX1-RV64-NEXT: vadd.vi v10, v8, -1
-; LMULMAX1-RV64-NEXT: vxor.vv v8, v8, v10
-; LMULMAX1-RV64-NEXT: vmsleu.vv v0, v8, v10
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-RV64-NEXT: vmv.v.i v8, 0
-; LMULMAX1-RV64-NEXT: vmerge.vim v8, v8, 1, v0
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vadd.vi v10, v9, -1
-; LMULMAX1-RV64-NEXT: vxor.vv v9, v9, v10
-; LMULMAX1-RV64-NEXT: vmsleu.vv v0, v9, v10
-; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-RV64-NEXT: vmv.v.i v9, 0
-; LMULMAX1-RV64-NEXT: vmerge.vim v9, v9, 1, v0
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-RV64-NEXT: vslideup.vi v8, v9, 2
-; LMULMAX1-RV64-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: ctpop_v4i64_ne_one:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vadd.vi v10, v8, -1
+; CHECK-NEXT: vxor.vv v8, v8, v10
+; CHECK-NEXT: vmsleu.vv v0, v8, v10
+; CHECK-NEXT: ret
;
; ZVBB-LABEL: ctpop_v4i64_ne_one:
; ZVBB: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll
index 5afd935..5802fba 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll
@@ -1,61 +1,74 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m,+zve64x,+zvl128b -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2,LMULMAX2-RV32,LMULMAX2-RV32I
-; RUN: llc -mtriple=riscv64 -mattr=+m,+zve64x,+zvl128b -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2,LMULMAX2-RV64,LMULMAX2-RV64I
-; RUN: llc -mtriple=riscv32 -mattr=+m,+zve64x,+zvl128b -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1-RV32
-; RUN: llc -mtriple=riscv64 -mattr=+m,+zve64x,+zvl128b -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1-RV64
-; RUN: llc -mtriple=riscv32 -mattr=+m,+zve64f,+zvl128b,+f -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2,LMULMAX2-RV32,LMULMAX2-RV32F
-; RUN: llc -mtriple=riscv64 -mattr=+m,+zve64f,+zvl128b,+f -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2,LMULMAX2-RV64,LMULMAX2-RV64F
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+d -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2,LMULMAX2-RV32,LMULMAX2-RV32D
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+d -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2,LMULMAX2-RV64,LMULMAX2-RV64D
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+d -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1-RV32
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+d -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1-RV64
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+d -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=LMULMAX8
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+d -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=LMULMAX8
+; RUN: llc -mtriple=riscv32 -mattr=+m,+zve64x,+zvl128b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RVI,RV32I
+; RUN: llc -mtriple=riscv64 -mattr=+m,+zve64x,+zvl128b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RVI,RV64I
+; RUN: llc -mtriple=riscv32 -mattr=+m,+zve64f,+zvl128b,+f -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RVF,RV32F
+; RUN: llc -mtriple=riscv64 -mattr=+m,+zve64f,+zvl128b,+f -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RVF,RV64F
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RVD,RV32D
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RVD,RV64D
; RUN: llc -mtriple=riscv32 -mattr=+v,+zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVBB
; RUN: llc -mtriple=riscv64 -mattr=+v,+zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVBB
define void @cttz_v16i8(ptr %x, ptr %y) nounwind {
-; CHECK-LABEL: cttz_v16i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: li a1, 1
-; CHECK-NEXT: vsub.vx v9, v8, a1
-; CHECK-NEXT: vnot.v v8, v8
-; CHECK-NEXT: vand.vv v8, v8, v9
-; CHECK-NEXT: vsrl.vi v9, v8, 1
-; CHECK-NEXT: li a1, 85
-; CHECK-NEXT: vand.vx v9, v9, a1
-; CHECK-NEXT: vsub.vv v8, v8, v9
-; CHECK-NEXT: li a1, 51
-; CHECK-NEXT: vand.vx v9, v8, a1
-; CHECK-NEXT: vsrl.vi v8, v8, 2
-; CHECK-NEXT: vand.vx v8, v8, a1
-; CHECK-NEXT: vadd.vv v8, v9, v8
-; CHECK-NEXT: vsrl.vi v9, v8, 4
-; CHECK-NEXT: vadd.vv v8, v8, v9
-; CHECK-NEXT: vand.vi v8, v8, 15
-; CHECK-NEXT: vse8.v v8, (a0)
-; CHECK-NEXT: ret
-;
-; LMULMAX8-LABEL: cttz_v16i8:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX8-NEXT: vle8.v v8, (a0)
-; LMULMAX8-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX8-NEXT: vand.vv v9, v8, v9
-; LMULMAX8-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; LMULMAX8-NEXT: vzext.vf2 v10, v9
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v12, v10
-; LMULMAX8-NEXT: vnsrl.wi v10, v12, 23
-; LMULMAX8-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; LMULMAX8-NEXT: vnsrl.wi v9, v10, 0
-; LMULMAX8-NEXT: li a1, 127
-; LMULMAX8-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX8-NEXT: vsub.vx v8, v9, a1
-; LMULMAX8-NEXT: vmerge.vim v8, v8, 8, v0
-; LMULMAX8-NEXT: vse8.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RVI-LABEL: cttz_v16i8:
+; RVI: # %bb.0:
+; RVI-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RVI-NEXT: vle8.v v8, (a0)
+; RVI-NEXT: li a1, 1
+; RVI-NEXT: vsub.vx v9, v8, a1
+; RVI-NEXT: vnot.v v8, v8
+; RVI-NEXT: vand.vv v8, v8, v9
+; RVI-NEXT: vsrl.vi v9, v8, 1
+; RVI-NEXT: li a1, 85
+; RVI-NEXT: vand.vx v9, v9, a1
+; RVI-NEXT: vsub.vv v8, v8, v9
+; RVI-NEXT: li a1, 51
+; RVI-NEXT: vand.vx v9, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 2
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: vadd.vv v8, v9, v8
+; RVI-NEXT: vsrl.vi v9, v8, 4
+; RVI-NEXT: vadd.vv v8, v8, v9
+; RVI-NEXT: vand.vi v8, v8, 15
+; RVI-NEXT: vse8.v v8, (a0)
+; RVI-NEXT: ret
+;
+; RVF-LABEL: cttz_v16i8:
+; RVF: # %bb.0:
+; RVF-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RVF-NEXT: vle8.v v8, (a0)
+; RVF-NEXT: vrsub.vi v9, v8, 0
+; RVF-NEXT: vand.vv v9, v8, v9
+; RVF-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; RVF-NEXT: vzext.vf2 v10, v9
+; RVF-NEXT: vfwcvt.f.xu.v v12, v10
+; RVF-NEXT: vnsrl.wi v10, v12, 23
+; RVF-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; RVF-NEXT: vnsrl.wi v9, v10, 0
+; RVF-NEXT: li a1, 127
+; RVF-NEXT: vmseq.vi v0, v8, 0
+; RVF-NEXT: vsub.vx v8, v9, a1
+; RVF-NEXT: vmerge.vim v8, v8, 8, v0
+; RVF-NEXT: vse8.v v8, (a0)
+; RVF-NEXT: ret
+;
+; RVD-LABEL: cttz_v16i8:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RVD-NEXT: vle8.v v8, (a0)
+; RVD-NEXT: vrsub.vi v9, v8, 0
+; RVD-NEXT: vand.vv v9, v8, v9
+; RVD-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; RVD-NEXT: vzext.vf2 v10, v9
+; RVD-NEXT: vfwcvt.f.xu.v v12, v10
+; RVD-NEXT: vnsrl.wi v10, v12, 23
+; RVD-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; RVD-NEXT: vnsrl.wi v9, v10, 0
+; RVD-NEXT: li a1, 127
+; RVD-NEXT: vmseq.vi v0, v8, 0
+; RVD-NEXT: vsub.vx v8, v9, a1
+; RVD-NEXT: vmerge.vim v8, v8, 8, v0
+; RVD-NEXT: vse8.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: cttz_v16i8:
; ZVBB: # %bb.0:
@@ -73,175 +86,67 @@ define void @cttz_v16i8(ptr %x, ptr %y) nounwind {
declare <16 x i8> @llvm.cttz.v16i8(<16 x i8>, i1)
define void @cttz_v8i16(ptr %x, ptr %y) nounwind {
-; LMULMAX2-RV32I-LABEL: cttz_v8i16:
-; LMULMAX2-RV32I: # %bb.0:
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: li a1, 1
-; LMULMAX2-RV32I-NEXT: vsub.vx v9, v8, a1
-; LMULMAX2-RV32I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV32I-NEXT: lui a1, 5
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV32I-NEXT: vand.vx v9, v9, a1
-; LMULMAX2-RV32I-NEXT: vsub.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 3
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV32I-NEXT: vand.vx v9, v8, a1
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV32I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v9, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 1
-; LMULMAX2-RV32I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV32I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: li a1, 257
-; LMULMAX2-RV32I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 8
-; LMULMAX2-RV32I-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: ret
-;
-; LMULMAX2-RV64I-LABEL: cttz_v8i16:
-; LMULMAX2-RV64I: # %bb.0:
-; LMULMAX2-RV64I-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX2-RV64I-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: li a1, 1
-; LMULMAX2-RV64I-NEXT: vsub.vx v9, v8, a1
-; LMULMAX2-RV64I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV64I-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV64I-NEXT: lui a1, 5
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV64I-NEXT: vand.vx v9, v9, a1
-; LMULMAX2-RV64I-NEXT: vsub.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: lui a1, 3
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV64I-NEXT: vand.vx v9, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v9, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: lui a1, 1
-; LMULMAX2-RV64I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: li a1, 257
-; LMULMAX2-RV64I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 8
-; LMULMAX2-RV64I-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: ret
-;
-; LMULMAX1-LABEL: cttz_v8i16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vle16.v v8, (a0)
-; LMULMAX1-NEXT: li a1, 1
-; LMULMAX1-NEXT: vsub.vx v9, v8, a1
-; LMULMAX1-NEXT: vnot.v v8, v8
-; LMULMAX1-NEXT: vand.vv v8, v8, v9
-; LMULMAX1-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX1-NEXT: lui a1, 5
-; LMULMAX1-NEXT: addi a1, a1, 1365
-; LMULMAX1-NEXT: vand.vx v9, v9, a1
-; LMULMAX1-NEXT: vsub.vv v8, v8, v9
-; LMULMAX1-NEXT: lui a1, 3
-; LMULMAX1-NEXT: addi a1, a1, 819
-; LMULMAX1-NEXT: vand.vx v9, v8, a1
-; LMULMAX1-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX1-NEXT: vand.vx v8, v8, a1
-; LMULMAX1-NEXT: vadd.vv v8, v9, v8
-; LMULMAX1-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX1-NEXT: vadd.vv v8, v8, v9
-; LMULMAX1-NEXT: lui a1, 1
-; LMULMAX1-NEXT: addi a1, a1, -241
-; LMULMAX1-NEXT: vand.vx v8, v8, a1
-; LMULMAX1-NEXT: li a1, 257
-; LMULMAX1-NEXT: vmul.vx v8, v8, a1
-; LMULMAX1-NEXT: vsrl.vi v8, v8, 8
-; LMULMAX1-NEXT: vse16.v v8, (a0)
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX2-RV32F-LABEL: cttz_v8i16:
-; LMULMAX2-RV32F: # %bb.0:
-; LMULMAX2-RV32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX2-RV32F-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX2-RV32F-NEXT: vand.vv v9, v8, v9
-; LMULMAX2-RV32F-NEXT: vfwcvt.f.xu.v v10, v9
-; LMULMAX2-RV32F-NEXT: vnsrl.wi v9, v10, 23
-; LMULMAX2-RV32F-NEXT: li a1, 127
-; LMULMAX2-RV32F-NEXT: vsub.vx v9, v9, a1
-; LMULMAX2-RV32F-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX2-RV32F-NEXT: li a1, 16
-; LMULMAX2-RV32F-NEXT: vmerge.vxm v8, v9, a1, v0
-; LMULMAX2-RV32F-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: ret
-;
-; LMULMAX2-RV64F-LABEL: cttz_v8i16:
-; LMULMAX2-RV64F: # %bb.0:
-; LMULMAX2-RV64F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX2-RV64F-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX2-RV64F-NEXT: vand.vv v9, v8, v9
-; LMULMAX2-RV64F-NEXT: vfwcvt.f.xu.v v10, v9
-; LMULMAX2-RV64F-NEXT: vnsrl.wi v9, v10, 23
-; LMULMAX2-RV64F-NEXT: li a1, 127
-; LMULMAX2-RV64F-NEXT: vsub.vx v9, v9, a1
-; LMULMAX2-RV64F-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX2-RV64F-NEXT: li a1, 16
-; LMULMAX2-RV64F-NEXT: vmerge.vxm v8, v9, a1, v0
-; LMULMAX2-RV64F-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: ret
-;
-; LMULMAX2-RV32D-LABEL: cttz_v8i16:
-; LMULMAX2-RV32D: # %bb.0:
-; LMULMAX2-RV32D-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX2-RV32D-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX2-RV32D-NEXT: vand.vv v9, v8, v9
-; LMULMAX2-RV32D-NEXT: vfwcvt.f.xu.v v10, v9
-; LMULMAX2-RV32D-NEXT: vnsrl.wi v9, v10, 23
-; LMULMAX2-RV32D-NEXT: li a1, 127
-; LMULMAX2-RV32D-NEXT: vsub.vx v9, v9, a1
-; LMULMAX2-RV32D-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX2-RV32D-NEXT: li a1, 16
-; LMULMAX2-RV32D-NEXT: vmerge.vxm v8, v9, a1, v0
-; LMULMAX2-RV32D-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: ret
-;
-; LMULMAX2-RV64D-LABEL: cttz_v8i16:
-; LMULMAX2-RV64D: # %bb.0:
-; LMULMAX2-RV64D-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX2-RV64D-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX2-RV64D-NEXT: vand.vv v9, v8, v9
-; LMULMAX2-RV64D-NEXT: vfwcvt.f.xu.v v10, v9
-; LMULMAX2-RV64D-NEXT: vnsrl.wi v9, v10, 23
-; LMULMAX2-RV64D-NEXT: li a1, 127
-; LMULMAX2-RV64D-NEXT: vsub.vx v9, v9, a1
-; LMULMAX2-RV64D-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX2-RV64D-NEXT: li a1, 16
-; LMULMAX2-RV64D-NEXT: vmerge.vxm v8, v9, a1, v0
-; LMULMAX2-RV64D-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: ret
-;
-; LMULMAX8-LABEL: cttz_v8i16:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX8-NEXT: vle16.v v8, (a0)
-; LMULMAX8-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX8-NEXT: vand.vv v9, v8, v9
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v10, v9
-; LMULMAX8-NEXT: vnsrl.wi v9, v10, 23
-; LMULMAX8-NEXT: li a1, 127
-; LMULMAX8-NEXT: vsub.vx v9, v9, a1
-; LMULMAX8-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX8-NEXT: li a1, 16
-; LMULMAX8-NEXT: vmerge.vxm v8, v9, a1, v0
-; LMULMAX8-NEXT: vse16.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RVI-LABEL: cttz_v8i16:
+; RVI: # %bb.0:
+; RVI-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RVI-NEXT: vle16.v v8, (a0)
+; RVI-NEXT: li a1, 1
+; RVI-NEXT: vsub.vx v9, v8, a1
+; RVI-NEXT: vnot.v v8, v8
+; RVI-NEXT: vand.vv v8, v8, v9
+; RVI-NEXT: vsrl.vi v9, v8, 1
+; RVI-NEXT: lui a1, 5
+; RVI-NEXT: addi a1, a1, 1365
+; RVI-NEXT: vand.vx v9, v9, a1
+; RVI-NEXT: vsub.vv v8, v8, v9
+; RVI-NEXT: lui a1, 3
+; RVI-NEXT: addi a1, a1, 819
+; RVI-NEXT: vand.vx v9, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 2
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: vadd.vv v8, v9, v8
+; RVI-NEXT: vsrl.vi v9, v8, 4
+; RVI-NEXT: vadd.vv v8, v8, v9
+; RVI-NEXT: lui a1, 1
+; RVI-NEXT: addi a1, a1, -241
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: li a1, 257
+; RVI-NEXT: vmul.vx v8, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 8
+; RVI-NEXT: vse16.v v8, (a0)
+; RVI-NEXT: ret
+;
+; RVF-LABEL: cttz_v8i16:
+; RVF: # %bb.0:
+; RVF-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RVF-NEXT: vle16.v v8, (a0)
+; RVF-NEXT: vrsub.vi v9, v8, 0
+; RVF-NEXT: vand.vv v9, v8, v9
+; RVF-NEXT: vfwcvt.f.xu.v v10, v9
+; RVF-NEXT: vnsrl.wi v9, v10, 23
+; RVF-NEXT: li a1, 127
+; RVF-NEXT: vsub.vx v9, v9, a1
+; RVF-NEXT: vmseq.vi v0, v8, 0
+; RVF-NEXT: li a1, 16
+; RVF-NEXT: vmerge.vxm v8, v9, a1, v0
+; RVF-NEXT: vse16.v v8, (a0)
+; RVF-NEXT: ret
+;
+; RVD-LABEL: cttz_v8i16:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RVD-NEXT: vle16.v v8, (a0)
+; RVD-NEXT: vrsub.vi v9, v8, 0
+; RVD-NEXT: vand.vv v9, v8, v9
+; RVD-NEXT: vfwcvt.f.xu.v v10, v9
+; RVD-NEXT: vnsrl.wi v9, v10, 23
+; RVD-NEXT: li a1, 127
+; RVD-NEXT: vsub.vx v9, v9, a1
+; RVD-NEXT: vmseq.vi v0, v8, 0
+; RVD-NEXT: li a1, 16
+; RVD-NEXT: vmerge.vxm v8, v9, a1, v0
+; RVD-NEXT: vse16.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: cttz_v8i16:
; ZVBB: # %bb.0:
@@ -259,154 +164,71 @@ define void @cttz_v8i16(ptr %x, ptr %y) nounwind {
declare <8 x i16> @llvm.cttz.v8i16(<8 x i16>, i1)
define void @cttz_v4i32(ptr %x, ptr %y) nounwind {
-; LMULMAX2-RV32I-LABEL: cttz_v4i32:
-; LMULMAX2-RV32I: # %bb.0:
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: li a1, 1
-; LMULMAX2-RV32I-NEXT: vsub.vx v9, v8, a1
-; LMULMAX2-RV32I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV32I-NEXT: lui a1, 349525
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV32I-NEXT: vand.vx v9, v9, a1
-; LMULMAX2-RV32I-NEXT: vsub.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 209715
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV32I-NEXT: vand.vx v9, v8, a1
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV32I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v9, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 61681
-; LMULMAX2-RV32I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV32I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: lui a1, 4112
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 257
-; LMULMAX2-RV32I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 24
-; LMULMAX2-RV32I-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: ret
-;
-; LMULMAX2-RV64I-LABEL: cttz_v4i32:
-; LMULMAX2-RV64I: # %bb.0:
-; LMULMAX2-RV64I-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX2-RV64I-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: li a1, 1
-; LMULMAX2-RV64I-NEXT: vsub.vx v9, v8, a1
-; LMULMAX2-RV64I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV64I-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV64I-NEXT: lui a1, 349525
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV64I-NEXT: vand.vx v9, v9, a1
-; LMULMAX2-RV64I-NEXT: vsub.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: lui a1, 209715
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV64I-NEXT: vand.vx v9, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v9, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: lui a1, 61681
-; LMULMAX2-RV64I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: lui a1, 4112
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 257
-; LMULMAX2-RV64I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 24
-; LMULMAX2-RV64I-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: ret
-;
-; LMULMAX2-RV32F-LABEL: cttz_v4i32:
-; LMULMAX2-RV32F: # %bb.0:
-; LMULMAX2-RV32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX2-RV32F-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX2-RV32F-NEXT: vand.vv v9, v8, v9
-; LMULMAX2-RV32F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32F-NEXT: vfcvt.f.xu.v v9, v9
-; LMULMAX2-RV32F-NEXT: fsrm a1
-; LMULMAX2-RV32F-NEXT: vsrl.vi v9, v9, 23
-; LMULMAX2-RV32F-NEXT: li a1, 127
-; LMULMAX2-RV32F-NEXT: vsub.vx v9, v9, a1
-; LMULMAX2-RV32F-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX2-RV32F-NEXT: li a1, 32
-; LMULMAX2-RV32F-NEXT: vmerge.vxm v8, v9, a1, v0
-; LMULMAX2-RV32F-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: ret
-;
-; LMULMAX2-RV64F-LABEL: cttz_v4i32:
-; LMULMAX2-RV64F: # %bb.0:
-; LMULMAX2-RV64F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX2-RV64F-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX2-RV64F-NEXT: vand.vv v9, v8, v9
-; LMULMAX2-RV64F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64F-NEXT: vfcvt.f.xu.v v9, v9
-; LMULMAX2-RV64F-NEXT: fsrm a1
-; LMULMAX2-RV64F-NEXT: vsrl.vi v9, v9, 23
-; LMULMAX2-RV64F-NEXT: li a1, 127
-; LMULMAX2-RV64F-NEXT: vsub.vx v9, v9, a1
-; LMULMAX2-RV64F-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX2-RV64F-NEXT: li a1, 32
-; LMULMAX2-RV64F-NEXT: vmerge.vxm v8, v9, a1, v0
-; LMULMAX2-RV64F-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: ret
-;
-; LMULMAX2-RV32D-LABEL: cttz_v4i32:
-; LMULMAX2-RV32D: # %bb.0:
-; LMULMAX2-RV32D-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX2-RV32D-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX2-RV32D-NEXT: vand.vv v9, v8, v9
-; LMULMAX2-RV32D-NEXT: vfwcvt.f.xu.v v10, v9
-; LMULMAX2-RV32D-NEXT: li a1, 52
-; LMULMAX2-RV32D-NEXT: vnsrl.wx v9, v10, a1
-; LMULMAX2-RV32D-NEXT: li a1, 1023
-; LMULMAX2-RV32D-NEXT: vsub.vx v9, v9, a1
-; LMULMAX2-RV32D-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX2-RV32D-NEXT: li a1, 32
-; LMULMAX2-RV32D-NEXT: vmerge.vxm v8, v9, a1, v0
-; LMULMAX2-RV32D-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: ret
-;
-; LMULMAX2-RV64D-LABEL: cttz_v4i32:
-; LMULMAX2-RV64D: # %bb.0:
-; LMULMAX2-RV64D-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX2-RV64D-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX2-RV64D-NEXT: vand.vv v9, v8, v9
-; LMULMAX2-RV64D-NEXT: vfwcvt.f.xu.v v10, v9
-; LMULMAX2-RV64D-NEXT: li a1, 52
-; LMULMAX2-RV64D-NEXT: vnsrl.wx v9, v10, a1
-; LMULMAX2-RV64D-NEXT: li a1, 1023
-; LMULMAX2-RV64D-NEXT: vsub.vx v9, v9, a1
-; LMULMAX2-RV64D-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX2-RV64D-NEXT: li a1, 32
-; LMULMAX2-RV64D-NEXT: vmerge.vxm v8, v9, a1, v0
-; LMULMAX2-RV64D-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: ret
-;
-; LMULMAX8-LABEL: cttz_v4i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX8-NEXT: vle32.v v8, (a0)
-; LMULMAX8-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX8-NEXT: vand.vv v9, v8, v9
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v10, v9
-; LMULMAX8-NEXT: li a1, 52
-; LMULMAX8-NEXT: vnsrl.wx v9, v10, a1
-; LMULMAX8-NEXT: li a1, 1023
-; LMULMAX8-NEXT: vsub.vx v9, v9, a1
-; LMULMAX8-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX8-NEXT: li a1, 32
-; LMULMAX8-NEXT: vmerge.vxm v8, v9, a1, v0
-; LMULMAX8-NEXT: vse32.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RVI-LABEL: cttz_v4i32:
+; RVI: # %bb.0:
+; RVI-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RVI-NEXT: vle32.v v8, (a0)
+; RVI-NEXT: li a1, 1
+; RVI-NEXT: vsub.vx v9, v8, a1
+; RVI-NEXT: vnot.v v8, v8
+; RVI-NEXT: vand.vv v8, v8, v9
+; RVI-NEXT: vsrl.vi v9, v8, 1
+; RVI-NEXT: lui a1, 349525
+; RVI-NEXT: addi a1, a1, 1365
+; RVI-NEXT: vand.vx v9, v9, a1
+; RVI-NEXT: vsub.vv v8, v8, v9
+; RVI-NEXT: lui a1, 209715
+; RVI-NEXT: addi a1, a1, 819
+; RVI-NEXT: vand.vx v9, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 2
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: vadd.vv v8, v9, v8
+; RVI-NEXT: vsrl.vi v9, v8, 4
+; RVI-NEXT: vadd.vv v8, v8, v9
+; RVI-NEXT: lui a1, 61681
+; RVI-NEXT: addi a1, a1, -241
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: lui a1, 4112
+; RVI-NEXT: addi a1, a1, 257
+; RVI-NEXT: vmul.vx v8, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 24
+; RVI-NEXT: vse32.v v8, (a0)
+; RVI-NEXT: ret
+;
+; RVF-LABEL: cttz_v4i32:
+; RVF: # %bb.0:
+; RVF-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RVF-NEXT: vle32.v v8, (a0)
+; RVF-NEXT: vrsub.vi v9, v8, 0
+; RVF-NEXT: vand.vv v9, v8, v9
+; RVF-NEXT: fsrmi a1, 1
+; RVF-NEXT: vfcvt.f.xu.v v9, v9
+; RVF-NEXT: fsrm a1
+; RVF-NEXT: vsrl.vi v9, v9, 23
+; RVF-NEXT: li a1, 127
+; RVF-NEXT: vsub.vx v9, v9, a1
+; RVF-NEXT: vmseq.vi v0, v8, 0
+; RVF-NEXT: li a1, 32
+; RVF-NEXT: vmerge.vxm v8, v9, a1, v0
+; RVF-NEXT: vse32.v v8, (a0)
+; RVF-NEXT: ret
+;
+; RVD-LABEL: cttz_v4i32:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RVD-NEXT: vle32.v v8, (a0)
+; RVD-NEXT: vrsub.vi v9, v8, 0
+; RVD-NEXT: vand.vv v9, v8, v9
+; RVD-NEXT: vfwcvt.f.xu.v v10, v9
+; RVD-NEXT: li a1, 52
+; RVD-NEXT: vnsrl.wx v9, v10, a1
+; RVD-NEXT: li a1, 1023
+; RVD-NEXT: vsub.vx v9, v9, a1
+; RVD-NEXT: vmseq.vi v0, v8, 0
+; RVD-NEXT: li a1, 32
+; RVD-NEXT: vmerge.vxm v8, v9, a1, v0
+; RVD-NEXT: vse32.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: cttz_v4i32:
; ZVBB: # %bb.0:
@@ -424,187 +246,149 @@ define void @cttz_v4i32(ptr %x, ptr %y) nounwind {
declare <4 x i32> @llvm.cttz.v4i32(<4 x i32>, i1)
define void @cttz_v2i64(ptr %x, ptr %y) nounwind {
-; LMULMAX2-RV32I-LABEL: cttz_v2i64:
-; LMULMAX2-RV32I: # %bb.0:
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: li a1, 1
-; LMULMAX2-RV32I-NEXT: vsub.vx v9, v8, a1
-; LMULMAX2-RV32I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV32I-NEXT: lui a1, 349525
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vand.vv v9, v9, v10
-; LMULMAX2-RV32I-NEXT: vsub.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 209715
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v9, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vand.vv v10, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 61681
-; LMULMAX2-RV32I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v9, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 4112
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 257
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v9, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vmul.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: li a1, 56
-; LMULMAX2-RV32I-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: ret
-;
-; LMULMAX2-RV64I-LABEL: cttz_v2i64:
-; LMULMAX2-RV64I: # %bb.0:
-; LMULMAX2-RV64I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV64I-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: li a1, 1
-; LMULMAX2-RV64I-NEXT: vsub.vx v9, v8, a1
-; LMULMAX2-RV64I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV64I-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV64I-NEXT: lui a1, 349525
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, 1365
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vand.vx v9, v9, a1
-; LMULMAX2-RV64I-NEXT: vsub.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: lui a1, 209715
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, 819
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vand.vx v9, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v9, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: lui a1, 61681
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, -241
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: lui a1, 4112
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, 257
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: li a1, 56
-; LMULMAX2-RV64I-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: ret
-;
-; LMULMAX2-RV32F-LABEL: cttz_v2i64:
-; LMULMAX2-RV32F: # %bb.0:
-; LMULMAX2-RV32F-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32F-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX2-RV32F-NEXT: vand.vv v9, v8, v9
-; LMULMAX2-RV32F-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; LMULMAX2-RV32F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32F-NEXT: vfncvt.f.xu.w v10, v9
-; LMULMAX2-RV32F-NEXT: fsrm a1
-; LMULMAX2-RV32F-NEXT: vsrl.vi v9, v10, 23
-; LMULMAX2-RV32F-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; LMULMAX2-RV32F-NEXT: vzext.vf2 v10, v9
-; LMULMAX2-RV32F-NEXT: li a1, 127
-; LMULMAX2-RV32F-NEXT: vsub.vx v9, v10, a1
-; LMULMAX2-RV32F-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX2-RV32F-NEXT: li a1, 64
-; LMULMAX2-RV32F-NEXT: vmerge.vxm v8, v9, a1, v0
-; LMULMAX2-RV32F-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: ret
-;
-; LMULMAX2-RV64F-LABEL: cttz_v2i64:
-; LMULMAX2-RV64F: # %bb.0:
-; LMULMAX2-RV64F-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV64F-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX2-RV64F-NEXT: vand.vv v9, v8, v9
-; LMULMAX2-RV64F-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; LMULMAX2-RV64F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64F-NEXT: vfncvt.f.xu.w v10, v9
-; LMULMAX2-RV64F-NEXT: fsrm a1
-; LMULMAX2-RV64F-NEXT: vsrl.vi v9, v10, 23
-; LMULMAX2-RV64F-NEXT: li a1, 127
-; LMULMAX2-RV64F-NEXT: vwsubu.vx v10, v9, a1
-; LMULMAX2-RV64F-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; LMULMAX2-RV64F-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX2-RV64F-NEXT: li a1, 64
-; LMULMAX2-RV64F-NEXT: vmerge.vxm v8, v10, a1, v0
-; LMULMAX2-RV64F-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: ret
-;
-; LMULMAX2-RV32D-LABEL: cttz_v2i64:
-; LMULMAX2-RV32D: # %bb.0:
-; LMULMAX2-RV32D-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32D-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX2-RV32D-NEXT: vand.vv v9, v8, v9
-; LMULMAX2-RV32D-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32D-NEXT: vfcvt.f.xu.v v9, v9
-; LMULMAX2-RV32D-NEXT: fsrm a1
-; LMULMAX2-RV32D-NEXT: li a1, 52
-; LMULMAX2-RV32D-NEXT: vsrl.vx v9, v9, a1
-; LMULMAX2-RV32D-NEXT: li a1, 1023
-; LMULMAX2-RV32D-NEXT: vsub.vx v9, v9, a1
-; LMULMAX2-RV32D-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX2-RV32D-NEXT: li a1, 64
-; LMULMAX2-RV32D-NEXT: vmerge.vxm v8, v9, a1, v0
-; LMULMAX2-RV32D-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: ret
-;
-; LMULMAX2-RV64D-LABEL: cttz_v2i64:
-; LMULMAX2-RV64D: # %bb.0:
-; LMULMAX2-RV64D-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV64D-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX2-RV64D-NEXT: vand.vv v9, v8, v9
-; LMULMAX2-RV64D-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64D-NEXT: vfcvt.f.xu.v v9, v9
-; LMULMAX2-RV64D-NEXT: fsrm a1
-; LMULMAX2-RV64D-NEXT: li a1, 52
-; LMULMAX2-RV64D-NEXT: vsrl.vx v9, v9, a1
-; LMULMAX2-RV64D-NEXT: li a1, 1023
-; LMULMAX2-RV64D-NEXT: vsub.vx v9, v9, a1
-; LMULMAX2-RV64D-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX2-RV64D-NEXT: li a1, 64
-; LMULMAX2-RV64D-NEXT: vmerge.vxm v8, v9, a1, v0
-; LMULMAX2-RV64D-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: ret
-;
-; LMULMAX8-LABEL: cttz_v2i64:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX8-NEXT: vle64.v v8, (a0)
-; LMULMAX8-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX8-NEXT: vand.vv v9, v8, v9
-; LMULMAX8-NEXT: fsrmi a1, 1
-; LMULMAX8-NEXT: vfcvt.f.xu.v v9, v9
-; LMULMAX8-NEXT: fsrm a1
-; LMULMAX8-NEXT: li a1, 52
-; LMULMAX8-NEXT: vsrl.vx v9, v9, a1
-; LMULMAX8-NEXT: li a1, 1023
-; LMULMAX8-NEXT: vsub.vx v9, v9, a1
-; LMULMAX8-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX8-NEXT: li a1, 64
-; LMULMAX8-NEXT: vmerge.vxm v8, v9, a1, v0
-; LMULMAX8-NEXT: vse64.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RV32I-LABEL: cttz_v2i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32I-NEXT: vle64.v v8, (a0)
+; RV32I-NEXT: li a1, 1
+; RV32I-NEXT: vsub.vx v9, v8, a1
+; RV32I-NEXT: vnot.v v8, v8
+; RV32I-NEXT: vand.vv v8, v8, v9
+; RV32I-NEXT: vsrl.vi v9, v8, 1
+; RV32I-NEXT: lui a1, 349525
+; RV32I-NEXT: addi a1, a1, 1365
+; RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
+; RV32I-NEXT: vmv.v.x v10, a1
+; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32I-NEXT: vand.vv v9, v9, v10
+; RV32I-NEXT: vsub.vv v8, v8, v9
+; RV32I-NEXT: lui a1, 209715
+; RV32I-NEXT: addi a1, a1, 819
+; RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
+; RV32I-NEXT: vmv.v.x v9, a1
+; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32I-NEXT: vand.vv v10, v8, v9
+; RV32I-NEXT: vsrl.vi v8, v8, 2
+; RV32I-NEXT: vand.vv v8, v8, v9
+; RV32I-NEXT: vadd.vv v8, v10, v8
+; RV32I-NEXT: vsrl.vi v9, v8, 4
+; RV32I-NEXT: vadd.vv v8, v8, v9
+; RV32I-NEXT: lui a1, 61681
+; RV32I-NEXT: addi a1, a1, -241
+; RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
+; RV32I-NEXT: vmv.v.x v9, a1
+; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32I-NEXT: vand.vv v8, v8, v9
+; RV32I-NEXT: lui a1, 4112
+; RV32I-NEXT: addi a1, a1, 257
+; RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
+; RV32I-NEXT: vmv.v.x v9, a1
+; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32I-NEXT: vmul.vv v8, v8, v9
+; RV32I-NEXT: li a1, 56
+; RV32I-NEXT: vsrl.vx v8, v8, a1
+; RV32I-NEXT: vse64.v v8, (a0)
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: cttz_v2i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64I-NEXT: vle64.v v8, (a0)
+; RV64I-NEXT: li a1, 1
+; RV64I-NEXT: vsub.vx v9, v8, a1
+; RV64I-NEXT: vnot.v v8, v8
+; RV64I-NEXT: vand.vv v8, v8, v9
+; RV64I-NEXT: vsrl.vi v9, v8, 1
+; RV64I-NEXT: lui a1, 349525
+; RV64I-NEXT: addiw a1, a1, 1365
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vand.vx v9, v9, a1
+; RV64I-NEXT: vsub.vv v8, v8, v9
+; RV64I-NEXT: lui a1, 209715
+; RV64I-NEXT: addiw a1, a1, 819
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vand.vx v9, v8, a1
+; RV64I-NEXT: vsrl.vi v8, v8, 2
+; RV64I-NEXT: vand.vx v8, v8, a1
+; RV64I-NEXT: vadd.vv v8, v9, v8
+; RV64I-NEXT: vsrl.vi v9, v8, 4
+; RV64I-NEXT: vadd.vv v8, v8, v9
+; RV64I-NEXT: lui a1, 61681
+; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vand.vx v8, v8, a1
+; RV64I-NEXT: lui a1, 4112
+; RV64I-NEXT: addiw a1, a1, 257
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vmul.vx v8, v8, a1
+; RV64I-NEXT: li a1, 56
+; RV64I-NEXT: vsrl.vx v8, v8, a1
+; RV64I-NEXT: vse64.v v8, (a0)
+; RV64I-NEXT: ret
+;
+; RV32F-LABEL: cttz_v2i64:
+; RV32F: # %bb.0:
+; RV32F-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32F-NEXT: vle64.v v8, (a0)
+; RV32F-NEXT: vrsub.vi v9, v8, 0
+; RV32F-NEXT: vand.vv v9, v8, v9
+; RV32F-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32F-NEXT: fsrmi a1, 1
+; RV32F-NEXT: vfncvt.f.xu.w v10, v9
+; RV32F-NEXT: fsrm a1
+; RV32F-NEXT: vsrl.vi v9, v10, 23
+; RV32F-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32F-NEXT: vzext.vf2 v10, v9
+; RV32F-NEXT: li a1, 127
+; RV32F-NEXT: vsub.vx v9, v10, a1
+; RV32F-NEXT: vmseq.vi v0, v8, 0
+; RV32F-NEXT: li a1, 64
+; RV32F-NEXT: vmerge.vxm v8, v9, a1, v0
+; RV32F-NEXT: vse64.v v8, (a0)
+; RV32F-NEXT: ret
+;
+; RV64F-LABEL: cttz_v2i64:
+; RV64F: # %bb.0:
+; RV64F-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64F-NEXT: vle64.v v8, (a0)
+; RV64F-NEXT: vrsub.vi v9, v8, 0
+; RV64F-NEXT: vand.vv v9, v8, v9
+; RV64F-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64F-NEXT: fsrmi a1, 1
+; RV64F-NEXT: vfncvt.f.xu.w v10, v9
+; RV64F-NEXT: fsrm a1
+; RV64F-NEXT: vsrl.vi v9, v10, 23
+; RV64F-NEXT: li a1, 127
+; RV64F-NEXT: vwsubu.vx v10, v9, a1
+; RV64F-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64F-NEXT: vmseq.vi v0, v8, 0
+; RV64F-NEXT: li a1, 64
+; RV64F-NEXT: vmerge.vxm v8, v10, a1, v0
+; RV64F-NEXT: vse64.v v8, (a0)
+; RV64F-NEXT: ret
+;
+; RVD-LABEL: cttz_v2i64:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RVD-NEXT: vle64.v v8, (a0)
+; RVD-NEXT: vrsub.vi v9, v8, 0
+; RVD-NEXT: vand.vv v9, v8, v9
+; RVD-NEXT: fsrmi a1, 1
+; RVD-NEXT: vfcvt.f.xu.v v9, v9
+; RVD-NEXT: fsrm a1
+; RVD-NEXT: li a1, 52
+; RVD-NEXT: vsrl.vx v9, v9, a1
+; RVD-NEXT: li a1, 1023
+; RVD-NEXT: vsub.vx v9, v9, a1
+; RVD-NEXT: vmseq.vi v0, v8, 0
+; RVD-NEXT: li a1, 64
+; RVD-NEXT: vmerge.vxm v8, v9, a1, v0
+; RVD-NEXT: vse64.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: cttz_v2i64:
; ZVBB: # %bb.0:
@@ -622,88 +406,69 @@ define void @cttz_v2i64(ptr %x, ptr %y) nounwind {
declare <2 x i64> @llvm.cttz.v2i64(<2 x i64>, i1)
define void @cttz_v32i8(ptr %x, ptr %y) nounwind {
-; LMULMAX2-LABEL: cttz_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a1, 32
-; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: li a1, 1
-; LMULMAX2-NEXT: vsub.vx v10, v8, a1
-; LMULMAX2-NEXT: vnot.v v8, v8
-; LMULMAX2-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-NEXT: li a1, 85
-; LMULMAX2-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-NEXT: li a1, 51
-; LMULMAX2-NEXT: vand.vx v10, v8, a1
-; LMULMAX2-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-NEXT: vand.vi v8, v8, 15
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: cttz_v32i8:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vle8.v v8, (a1)
-; LMULMAX1-NEXT: vle8.v v9, (a0)
-; LMULMAX1-NEXT: li a2, 1
-; LMULMAX1-NEXT: vsub.vx v10, v8, a2
-; LMULMAX1-NEXT: vnot.v v8, v8
-; LMULMAX1-NEXT: vand.vv v8, v8, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX1-NEXT: li a3, 85
-; LMULMAX1-NEXT: vand.vx v10, v10, a3
-; LMULMAX1-NEXT: vsub.vv v8, v8, v10
-; LMULMAX1-NEXT: li a4, 51
-; LMULMAX1-NEXT: vand.vx v10, v8, a4
-; LMULMAX1-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX1-NEXT: vand.vx v8, v8, a4
-; LMULMAX1-NEXT: vadd.vv v8, v10, v8
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX1-NEXT: vadd.vv v8, v8, v10
-; LMULMAX1-NEXT: vand.vi v8, v8, 15
-; LMULMAX1-NEXT: vsub.vx v10, v9, a2
-; LMULMAX1-NEXT: vnot.v v9, v9
-; LMULMAX1-NEXT: vand.vv v9, v9, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 1
-; LMULMAX1-NEXT: vand.vx v10, v10, a3
-; LMULMAX1-NEXT: vsub.vv v9, v9, v10
-; LMULMAX1-NEXT: vand.vx v10, v9, a4
-; LMULMAX1-NEXT: vsrl.vi v9, v9, 2
-; LMULMAX1-NEXT: vand.vx v9, v9, a4
-; LMULMAX1-NEXT: vadd.vv v9, v10, v9
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 4
-; LMULMAX1-NEXT: vadd.vv v9, v9, v10
-; LMULMAX1-NEXT: vand.vi v9, v9, 15
-; LMULMAX1-NEXT: vse8.v v9, (a0)
-; LMULMAX1-NEXT: vse8.v v8, (a1)
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX8-LABEL: cttz_v32i8:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a1, 32
-; LMULMAX8-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; LMULMAX8-NEXT: vle8.v v8, (a0)
-; LMULMAX8-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX8-NEXT: vand.vv v10, v8, v10
-; LMULMAX8-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; LMULMAX8-NEXT: vzext.vf2 v12, v10
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v16, v12
-; LMULMAX8-NEXT: vnsrl.wi v12, v16, 23
-; LMULMAX8-NEXT: vsetvli zero, zero, e8, m2, ta, ma
-; LMULMAX8-NEXT: vnsrl.wi v10, v12, 0
-; LMULMAX8-NEXT: li a1, 127
-; LMULMAX8-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX8-NEXT: vsub.vx v8, v10, a1
-; LMULMAX8-NEXT: vmerge.vim v8, v8, 8, v0
-; LMULMAX8-NEXT: vse8.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RVI-LABEL: cttz_v32i8:
+; RVI: # %bb.0:
+; RVI-NEXT: li a1, 32
+; RVI-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RVI-NEXT: vle8.v v8, (a0)
+; RVI-NEXT: li a1, 1
+; RVI-NEXT: vsub.vx v10, v8, a1
+; RVI-NEXT: vnot.v v8, v8
+; RVI-NEXT: vand.vv v8, v8, v10
+; RVI-NEXT: vsrl.vi v10, v8, 1
+; RVI-NEXT: li a1, 85
+; RVI-NEXT: vand.vx v10, v10, a1
+; RVI-NEXT: vsub.vv v8, v8, v10
+; RVI-NEXT: li a1, 51
+; RVI-NEXT: vand.vx v10, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 2
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: vadd.vv v8, v10, v8
+; RVI-NEXT: vsrl.vi v10, v8, 4
+; RVI-NEXT: vadd.vv v8, v8, v10
+; RVI-NEXT: vand.vi v8, v8, 15
+; RVI-NEXT: vse8.v v8, (a0)
+; RVI-NEXT: ret
+;
+; RVF-LABEL: cttz_v32i8:
+; RVF: # %bb.0:
+; RVF-NEXT: li a1, 32
+; RVF-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RVF-NEXT: vle8.v v8, (a0)
+; RVF-NEXT: vrsub.vi v10, v8, 0
+; RVF-NEXT: vand.vv v10, v8, v10
+; RVF-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; RVF-NEXT: vzext.vf2 v12, v10
+; RVF-NEXT: vfwcvt.f.xu.v v16, v12
+; RVF-NEXT: vnsrl.wi v12, v16, 23
+; RVF-NEXT: vsetvli zero, zero, e8, m2, ta, ma
+; RVF-NEXT: vnsrl.wi v10, v12, 0
+; RVF-NEXT: li a1, 127
+; RVF-NEXT: vmseq.vi v0, v8, 0
+; RVF-NEXT: vsub.vx v8, v10, a1
+; RVF-NEXT: vmerge.vim v8, v8, 8, v0
+; RVF-NEXT: vse8.v v8, (a0)
+; RVF-NEXT: ret
+;
+; RVD-LABEL: cttz_v32i8:
+; RVD: # %bb.0:
+; RVD-NEXT: li a1, 32
+; RVD-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RVD-NEXT: vle8.v v8, (a0)
+; RVD-NEXT: vrsub.vi v10, v8, 0
+; RVD-NEXT: vand.vv v10, v8, v10
+; RVD-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; RVD-NEXT: vzext.vf2 v12, v10
+; RVD-NEXT: vfwcvt.f.xu.v v16, v12
+; RVD-NEXT: vnsrl.wi v12, v16, 23
+; RVD-NEXT: vsetvli zero, zero, e8, m2, ta, ma
+; RVD-NEXT: vnsrl.wi v10, v12, 0
+; RVD-NEXT: li a1, 127
+; RVD-NEXT: vmseq.vi v0, v8, 0
+; RVD-NEXT: vsub.vx v8, v10, a1
+; RVD-NEXT: vmerge.vim v8, v8, 8, v0
+; RVD-NEXT: vse8.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: cttz_v32i8:
; ZVBB: # %bb.0:
@@ -722,99 +487,67 @@ define void @cttz_v32i8(ptr %x, ptr %y) nounwind {
declare <32 x i8> @llvm.cttz.v32i8(<32 x i8>, i1)
define void @cttz_v16i16(ptr %x, ptr %y) nounwind {
-; LMULMAX2-LABEL: cttz_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: li a1, 1
-; LMULMAX2-NEXT: vsub.vx v10, v8, a1
-; LMULMAX2-NEXT: vnot.v v8, v8
-; LMULMAX2-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-NEXT: lui a1, 5
-; LMULMAX2-NEXT: addi a1, a1, 1365
-; LMULMAX2-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-NEXT: lui a1, 3
-; LMULMAX2-NEXT: addi a1, a1, 819
-; LMULMAX2-NEXT: vand.vx v10, v8, a1
-; LMULMAX2-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-NEXT: lui a1, 1
-; LMULMAX2-NEXT: addi a1, a1, -241
-; LMULMAX2-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-NEXT: li a1, 257
-; LMULMAX2-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-NEXT: vsrl.vi v8, v8, 8
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: cttz_v16i16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vle16.v v8, (a1)
-; LMULMAX1-NEXT: vle16.v v9, (a0)
-; LMULMAX1-NEXT: li a2, 1
-; LMULMAX1-NEXT: vsub.vx v10, v8, a2
-; LMULMAX1-NEXT: vnot.v v8, v8
-; LMULMAX1-NEXT: vand.vv v8, v8, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX1-NEXT: lui a3, 5
-; LMULMAX1-NEXT: addi a3, a3, 1365
-; LMULMAX1-NEXT: vand.vx v10, v10, a3
-; LMULMAX1-NEXT: vsub.vv v8, v8, v10
-; LMULMAX1-NEXT: lui a4, 3
-; LMULMAX1-NEXT: addi a4, a4, 819
-; LMULMAX1-NEXT: vand.vx v10, v8, a4
-; LMULMAX1-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX1-NEXT: vand.vx v8, v8, a4
-; LMULMAX1-NEXT: vadd.vv v8, v10, v8
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX1-NEXT: vadd.vv v8, v8, v10
-; LMULMAX1-NEXT: lui a5, 1
-; LMULMAX1-NEXT: addi a5, a5, -241
-; LMULMAX1-NEXT: vand.vx v8, v8, a5
-; LMULMAX1-NEXT: li a6, 257
-; LMULMAX1-NEXT: vmul.vx v8, v8, a6
-; LMULMAX1-NEXT: vsrl.vi v8, v8, 8
-; LMULMAX1-NEXT: vsub.vx v10, v9, a2
-; LMULMAX1-NEXT: vnot.v v9, v9
-; LMULMAX1-NEXT: vand.vv v9, v9, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 1
-; LMULMAX1-NEXT: vand.vx v10, v10, a3
-; LMULMAX1-NEXT: vsub.vv v9, v9, v10
-; LMULMAX1-NEXT: vand.vx v10, v9, a4
-; LMULMAX1-NEXT: vsrl.vi v9, v9, 2
-; LMULMAX1-NEXT: vand.vx v9, v9, a4
-; LMULMAX1-NEXT: vadd.vv v9, v10, v9
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 4
-; LMULMAX1-NEXT: vadd.vv v9, v9, v10
-; LMULMAX1-NEXT: vand.vx v9, v9, a5
-; LMULMAX1-NEXT: vmul.vx v9, v9, a6
-; LMULMAX1-NEXT: vsrl.vi v9, v9, 8
-; LMULMAX1-NEXT: vse16.v v9, (a0)
-; LMULMAX1-NEXT: vse16.v v8, (a1)
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX8-LABEL: cttz_v16i16:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX8-NEXT: vle16.v v8, (a0)
-; LMULMAX8-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX8-NEXT: vand.vv v10, v8, v10
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v12, v10
-; LMULMAX8-NEXT: vnsrl.wi v10, v12, 23
-; LMULMAX8-NEXT: li a1, 127
-; LMULMAX8-NEXT: vsub.vx v10, v10, a1
-; LMULMAX8-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX8-NEXT: li a1, 16
-; LMULMAX8-NEXT: vmerge.vxm v8, v10, a1, v0
-; LMULMAX8-NEXT: vse16.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RVI-LABEL: cttz_v16i16:
+; RVI: # %bb.0:
+; RVI-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RVI-NEXT: vle16.v v8, (a0)
+; RVI-NEXT: li a1, 1
+; RVI-NEXT: vsub.vx v10, v8, a1
+; RVI-NEXT: vnot.v v8, v8
+; RVI-NEXT: vand.vv v8, v8, v10
+; RVI-NEXT: vsrl.vi v10, v8, 1
+; RVI-NEXT: lui a1, 5
+; RVI-NEXT: addi a1, a1, 1365
+; RVI-NEXT: vand.vx v10, v10, a1
+; RVI-NEXT: vsub.vv v8, v8, v10
+; RVI-NEXT: lui a1, 3
+; RVI-NEXT: addi a1, a1, 819
+; RVI-NEXT: vand.vx v10, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 2
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: vadd.vv v8, v10, v8
+; RVI-NEXT: vsrl.vi v10, v8, 4
+; RVI-NEXT: vadd.vv v8, v8, v10
+; RVI-NEXT: lui a1, 1
+; RVI-NEXT: addi a1, a1, -241
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: li a1, 257
+; RVI-NEXT: vmul.vx v8, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 8
+; RVI-NEXT: vse16.v v8, (a0)
+; RVI-NEXT: ret
+;
+; RVF-LABEL: cttz_v16i16:
+; RVF: # %bb.0:
+; RVF-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RVF-NEXT: vle16.v v8, (a0)
+; RVF-NEXT: vrsub.vi v10, v8, 0
+; RVF-NEXT: vand.vv v10, v8, v10
+; RVF-NEXT: vfwcvt.f.xu.v v12, v10
+; RVF-NEXT: vnsrl.wi v10, v12, 23
+; RVF-NEXT: li a1, 127
+; RVF-NEXT: vsub.vx v10, v10, a1
+; RVF-NEXT: vmseq.vi v0, v8, 0
+; RVF-NEXT: li a1, 16
+; RVF-NEXT: vmerge.vxm v8, v10, a1, v0
+; RVF-NEXT: vse16.v v8, (a0)
+; RVF-NEXT: ret
+;
+; RVD-LABEL: cttz_v16i16:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RVD-NEXT: vle16.v v8, (a0)
+; RVD-NEXT: vrsub.vi v10, v8, 0
+; RVD-NEXT: vand.vv v10, v8, v10
+; RVD-NEXT: vfwcvt.f.xu.v v12, v10
+; RVD-NEXT: vnsrl.wi v10, v12, 23
+; RVD-NEXT: li a1, 127
+; RVD-NEXT: vsub.vx v10, v10, a1
+; RVD-NEXT: vmseq.vi v0, v8, 0
+; RVD-NEXT: li a1, 16
+; RVD-NEXT: vmerge.vxm v8, v10, a1, v0
+; RVD-NEXT: vse16.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: cttz_v16i16:
; ZVBB: # %bb.0:
@@ -832,156 +565,71 @@ define void @cttz_v16i16(ptr %x, ptr %y) nounwind {
declare <16 x i16> @llvm.cttz.v16i16(<16 x i16>, i1)
define void @cttz_v8i32(ptr %x, ptr %y) nounwind {
-; LMULMAX2-RV32I-LABEL: cttz_v8i32:
-; LMULMAX2-RV32I: # %bb.0:
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: li a1, 1
-; LMULMAX2-RV32I-NEXT: vsub.vx v10, v8, a1
-; LMULMAX2-RV32I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV32I-NEXT: lui a1, 349525
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV32I-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV32I-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: lui a1, 209715
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV32I-NEXT: vand.vx v10, v8, a1
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV32I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: lui a1, 61681
-; LMULMAX2-RV32I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV32I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: lui a1, 4112
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 257
-; LMULMAX2-RV32I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 24
-; LMULMAX2-RV32I-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: ret
-;
-; LMULMAX2-RV64I-LABEL: cttz_v8i32:
-; LMULMAX2-RV64I: # %bb.0:
-; LMULMAX2-RV64I-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV64I-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: li a1, 1
-; LMULMAX2-RV64I-NEXT: vsub.vx v10, v8, a1
-; LMULMAX2-RV64I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV64I-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV64I-NEXT: lui a1, 349525
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV64I-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV64I-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: lui a1, 209715
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV64I-NEXT: vand.vx v10, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: lui a1, 61681
-; LMULMAX2-RV64I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: lui a1, 4112
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 257
-; LMULMAX2-RV64I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 24
-; LMULMAX2-RV64I-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: ret
-;
-; LMULMAX2-RV32F-LABEL: cttz_v8i32:
-; LMULMAX2-RV32F: # %bb.0:
-; LMULMAX2-RV32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV32F-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX2-RV32F-NEXT: vand.vv v10, v8, v10
-; LMULMAX2-RV32F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32F-NEXT: vfcvt.f.xu.v v10, v10
-; LMULMAX2-RV32F-NEXT: fsrm a1
-; LMULMAX2-RV32F-NEXT: vsrl.vi v10, v10, 23
-; LMULMAX2-RV32F-NEXT: li a1, 127
-; LMULMAX2-RV32F-NEXT: vsub.vx v10, v10, a1
-; LMULMAX2-RV32F-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX2-RV32F-NEXT: li a1, 32
-; LMULMAX2-RV32F-NEXT: vmerge.vxm v8, v10, a1, v0
-; LMULMAX2-RV32F-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: ret
-;
-; LMULMAX2-RV64F-LABEL: cttz_v8i32:
-; LMULMAX2-RV64F: # %bb.0:
-; LMULMAX2-RV64F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV64F-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX2-RV64F-NEXT: vand.vv v10, v8, v10
-; LMULMAX2-RV64F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64F-NEXT: vfcvt.f.xu.v v10, v10
-; LMULMAX2-RV64F-NEXT: fsrm a1
-; LMULMAX2-RV64F-NEXT: vsrl.vi v10, v10, 23
-; LMULMAX2-RV64F-NEXT: li a1, 127
-; LMULMAX2-RV64F-NEXT: vsub.vx v10, v10, a1
-; LMULMAX2-RV64F-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX2-RV64F-NEXT: li a1, 32
-; LMULMAX2-RV64F-NEXT: vmerge.vxm v8, v10, a1, v0
-; LMULMAX2-RV64F-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: ret
-;
-; LMULMAX2-RV32D-LABEL: cttz_v8i32:
-; LMULMAX2-RV32D: # %bb.0:
-; LMULMAX2-RV32D-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV32D-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX2-RV32D-NEXT: vand.vv v10, v8, v10
-; LMULMAX2-RV32D-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32D-NEXT: vfcvt.f.xu.v v10, v10
-; LMULMAX2-RV32D-NEXT: fsrm a1
-; LMULMAX2-RV32D-NEXT: vsrl.vi v10, v10, 23
-; LMULMAX2-RV32D-NEXT: li a1, 127
-; LMULMAX2-RV32D-NEXT: vsub.vx v10, v10, a1
-; LMULMAX2-RV32D-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX2-RV32D-NEXT: li a1, 32
-; LMULMAX2-RV32D-NEXT: vmerge.vxm v8, v10, a1, v0
-; LMULMAX2-RV32D-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: ret
-;
-; LMULMAX2-RV64D-LABEL: cttz_v8i32:
-; LMULMAX2-RV64D: # %bb.0:
-; LMULMAX2-RV64D-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV64D-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX2-RV64D-NEXT: vand.vv v10, v8, v10
-; LMULMAX2-RV64D-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64D-NEXT: vfcvt.f.xu.v v10, v10
-; LMULMAX2-RV64D-NEXT: fsrm a1
-; LMULMAX2-RV64D-NEXT: vsrl.vi v10, v10, 23
-; LMULMAX2-RV64D-NEXT: li a1, 127
-; LMULMAX2-RV64D-NEXT: vsub.vx v10, v10, a1
-; LMULMAX2-RV64D-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX2-RV64D-NEXT: li a1, 32
-; LMULMAX2-RV64D-NEXT: vmerge.vxm v8, v10, a1, v0
-; LMULMAX2-RV64D-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: ret
-;
-; LMULMAX8-LABEL: cttz_v8i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX8-NEXT: vle32.v v8, (a0)
-; LMULMAX8-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX8-NEXT: vand.vv v10, v8, v10
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v12, v10
-; LMULMAX8-NEXT: li a1, 52
-; LMULMAX8-NEXT: vnsrl.wx v10, v12, a1
-; LMULMAX8-NEXT: li a1, 1023
-; LMULMAX8-NEXT: vsub.vx v10, v10, a1
-; LMULMAX8-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX8-NEXT: li a1, 32
-; LMULMAX8-NEXT: vmerge.vxm v8, v10, a1, v0
-; LMULMAX8-NEXT: vse32.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RVI-LABEL: cttz_v8i32:
+; RVI: # %bb.0:
+; RVI-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RVI-NEXT: vle32.v v8, (a0)
+; RVI-NEXT: li a1, 1
+; RVI-NEXT: vsub.vx v10, v8, a1
+; RVI-NEXT: vnot.v v8, v8
+; RVI-NEXT: vand.vv v8, v8, v10
+; RVI-NEXT: vsrl.vi v10, v8, 1
+; RVI-NEXT: lui a1, 349525
+; RVI-NEXT: addi a1, a1, 1365
+; RVI-NEXT: vand.vx v10, v10, a1
+; RVI-NEXT: vsub.vv v8, v8, v10
+; RVI-NEXT: lui a1, 209715
+; RVI-NEXT: addi a1, a1, 819
+; RVI-NEXT: vand.vx v10, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 2
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: vadd.vv v8, v10, v8
+; RVI-NEXT: vsrl.vi v10, v8, 4
+; RVI-NEXT: vadd.vv v8, v8, v10
+; RVI-NEXT: lui a1, 61681
+; RVI-NEXT: addi a1, a1, -241
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: lui a1, 4112
+; RVI-NEXT: addi a1, a1, 257
+; RVI-NEXT: vmul.vx v8, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 24
+; RVI-NEXT: vse32.v v8, (a0)
+; RVI-NEXT: ret
+;
+; RVF-LABEL: cttz_v8i32:
+; RVF: # %bb.0:
+; RVF-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RVF-NEXT: vle32.v v8, (a0)
+; RVF-NEXT: vrsub.vi v10, v8, 0
+; RVF-NEXT: vand.vv v10, v8, v10
+; RVF-NEXT: fsrmi a1, 1
+; RVF-NEXT: vfcvt.f.xu.v v10, v10
+; RVF-NEXT: fsrm a1
+; RVF-NEXT: vsrl.vi v10, v10, 23
+; RVF-NEXT: li a1, 127
+; RVF-NEXT: vsub.vx v10, v10, a1
+; RVF-NEXT: vmseq.vi v0, v8, 0
+; RVF-NEXT: li a1, 32
+; RVF-NEXT: vmerge.vxm v8, v10, a1, v0
+; RVF-NEXT: vse32.v v8, (a0)
+; RVF-NEXT: ret
+;
+; RVD-LABEL: cttz_v8i32:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RVD-NEXT: vle32.v v8, (a0)
+; RVD-NEXT: vrsub.vi v10, v8, 0
+; RVD-NEXT: vand.vv v10, v8, v10
+; RVD-NEXT: vfwcvt.f.xu.v v12, v10
+; RVD-NEXT: li a1, 52
+; RVD-NEXT: vnsrl.wx v10, v12, a1
+; RVD-NEXT: li a1, 1023
+; RVD-NEXT: vsub.vx v10, v10, a1
+; RVD-NEXT: vmseq.vi v0, v8, 0
+; RVD-NEXT: li a1, 32
+; RVD-NEXT: vmerge.vxm v8, v10, a1, v0
+; RVD-NEXT: vse32.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: cttz_v8i32:
; ZVBB: # %bb.0:
@@ -999,187 +647,149 @@ define void @cttz_v8i32(ptr %x, ptr %y) nounwind {
declare <8 x i32> @llvm.cttz.v8i32(<8 x i32>, i1)
define void @cttz_v4i64(ptr %x, ptr %y) nounwind {
-; LMULMAX2-RV32I-LABEL: cttz_v4i64:
-; LMULMAX2-RV32I: # %bb.0:
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: li a1, 1
-; LMULMAX2-RV32I-NEXT: vsub.vx v10, v8, a1
-; LMULMAX2-RV32I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV32I-NEXT: lui a1, 349525
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v12, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vand.vv v10, v10, v12
-; LMULMAX2-RV32I-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: lui a1, 209715
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vand.vv v12, v8, v10
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v12, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: lui a1, 61681
-; LMULMAX2-RV32I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: lui a1, 4112
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 257
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vmul.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: li a1, 56
-; LMULMAX2-RV32I-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: ret
-;
-; LMULMAX2-RV64I-LABEL: cttz_v4i64:
-; LMULMAX2-RV64I: # %bb.0:
-; LMULMAX2-RV64I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV64I-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: li a1, 1
-; LMULMAX2-RV64I-NEXT: vsub.vx v10, v8, a1
-; LMULMAX2-RV64I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV64I-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV64I-NEXT: lui a1, 349525
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, 1365
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV64I-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: lui a1, 209715
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, 819
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vand.vx v10, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: lui a1, 61681
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, -241
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: lui a1, 4112
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, 257
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: li a1, 56
-; LMULMAX2-RV64I-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: ret
-;
-; LMULMAX2-RV32F-LABEL: cttz_v4i64:
-; LMULMAX2-RV32F: # %bb.0:
-; LMULMAX2-RV32F-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32F-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX2-RV32F-NEXT: vand.vv v10, v8, v10
-; LMULMAX2-RV32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; LMULMAX2-RV32F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32F-NEXT: vfncvt.f.xu.w v12, v10
-; LMULMAX2-RV32F-NEXT: fsrm a1
-; LMULMAX2-RV32F-NEXT: vsrl.vi v10, v12, 23
-; LMULMAX2-RV32F-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; LMULMAX2-RV32F-NEXT: vzext.vf2 v12, v10
-; LMULMAX2-RV32F-NEXT: li a1, 127
-; LMULMAX2-RV32F-NEXT: vsub.vx v10, v12, a1
-; LMULMAX2-RV32F-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX2-RV32F-NEXT: li a1, 64
-; LMULMAX2-RV32F-NEXT: vmerge.vxm v8, v10, a1, v0
-; LMULMAX2-RV32F-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: ret
-;
-; LMULMAX2-RV64F-LABEL: cttz_v4i64:
-; LMULMAX2-RV64F: # %bb.0:
-; LMULMAX2-RV64F-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV64F-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX2-RV64F-NEXT: vand.vv v10, v8, v10
-; LMULMAX2-RV64F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; LMULMAX2-RV64F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64F-NEXT: vfncvt.f.xu.w v12, v10
-; LMULMAX2-RV64F-NEXT: fsrm a1
-; LMULMAX2-RV64F-NEXT: vsrl.vi v10, v12, 23
-; LMULMAX2-RV64F-NEXT: li a1, 127
-; LMULMAX2-RV64F-NEXT: vwsubu.vx v12, v10, a1
-; LMULMAX2-RV64F-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; LMULMAX2-RV64F-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX2-RV64F-NEXT: li a1, 64
-; LMULMAX2-RV64F-NEXT: vmerge.vxm v8, v12, a1, v0
-; LMULMAX2-RV64F-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: ret
-;
-; LMULMAX2-RV32D-LABEL: cttz_v4i64:
-; LMULMAX2-RV32D: # %bb.0:
-; LMULMAX2-RV32D-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32D-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX2-RV32D-NEXT: vand.vv v10, v8, v10
-; LMULMAX2-RV32D-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32D-NEXT: vfcvt.f.xu.v v10, v10
-; LMULMAX2-RV32D-NEXT: fsrm a1
-; LMULMAX2-RV32D-NEXT: li a1, 52
-; LMULMAX2-RV32D-NEXT: vsrl.vx v10, v10, a1
-; LMULMAX2-RV32D-NEXT: li a1, 1023
-; LMULMAX2-RV32D-NEXT: vsub.vx v10, v10, a1
-; LMULMAX2-RV32D-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX2-RV32D-NEXT: li a1, 64
-; LMULMAX2-RV32D-NEXT: vmerge.vxm v8, v10, a1, v0
-; LMULMAX2-RV32D-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: ret
-;
-; LMULMAX2-RV64D-LABEL: cttz_v4i64:
-; LMULMAX2-RV64D: # %bb.0:
-; LMULMAX2-RV64D-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV64D-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX2-RV64D-NEXT: vand.vv v10, v8, v10
-; LMULMAX2-RV64D-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64D-NEXT: vfcvt.f.xu.v v10, v10
-; LMULMAX2-RV64D-NEXT: fsrm a1
-; LMULMAX2-RV64D-NEXT: li a1, 52
-; LMULMAX2-RV64D-NEXT: vsrl.vx v10, v10, a1
-; LMULMAX2-RV64D-NEXT: li a1, 1023
-; LMULMAX2-RV64D-NEXT: vsub.vx v10, v10, a1
-; LMULMAX2-RV64D-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX2-RV64D-NEXT: li a1, 64
-; LMULMAX2-RV64D-NEXT: vmerge.vxm v8, v10, a1, v0
-; LMULMAX2-RV64D-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: ret
-;
-; LMULMAX8-LABEL: cttz_v4i64:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX8-NEXT: vle64.v v8, (a0)
-; LMULMAX8-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX8-NEXT: vand.vv v10, v8, v10
-; LMULMAX8-NEXT: fsrmi a1, 1
-; LMULMAX8-NEXT: vfcvt.f.xu.v v10, v10
-; LMULMAX8-NEXT: fsrm a1
-; LMULMAX8-NEXT: li a1, 52
-; LMULMAX8-NEXT: vsrl.vx v10, v10, a1
-; LMULMAX8-NEXT: li a1, 1023
-; LMULMAX8-NEXT: vsub.vx v10, v10, a1
-; LMULMAX8-NEXT: vmseq.vi v0, v8, 0
-; LMULMAX8-NEXT: li a1, 64
-; LMULMAX8-NEXT: vmerge.vxm v8, v10, a1, v0
-; LMULMAX8-NEXT: vse64.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RV32I-LABEL: cttz_v4i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32I-NEXT: vle64.v v8, (a0)
+; RV32I-NEXT: li a1, 1
+; RV32I-NEXT: vsub.vx v10, v8, a1
+; RV32I-NEXT: vnot.v v8, v8
+; RV32I-NEXT: vand.vv v8, v8, v10
+; RV32I-NEXT: vsrl.vi v10, v8, 1
+; RV32I-NEXT: lui a1, 349525
+; RV32I-NEXT: addi a1, a1, 1365
+; RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; RV32I-NEXT: vmv.v.x v12, a1
+; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32I-NEXT: vand.vv v10, v10, v12
+; RV32I-NEXT: vsub.vv v8, v8, v10
+; RV32I-NEXT: lui a1, 209715
+; RV32I-NEXT: addi a1, a1, 819
+; RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; RV32I-NEXT: vmv.v.x v10, a1
+; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32I-NEXT: vand.vv v12, v8, v10
+; RV32I-NEXT: vsrl.vi v8, v8, 2
+; RV32I-NEXT: vand.vv v8, v8, v10
+; RV32I-NEXT: vadd.vv v8, v12, v8
+; RV32I-NEXT: vsrl.vi v10, v8, 4
+; RV32I-NEXT: vadd.vv v8, v8, v10
+; RV32I-NEXT: lui a1, 61681
+; RV32I-NEXT: addi a1, a1, -241
+; RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; RV32I-NEXT: vmv.v.x v10, a1
+; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32I-NEXT: vand.vv v8, v8, v10
+; RV32I-NEXT: lui a1, 4112
+; RV32I-NEXT: addi a1, a1, 257
+; RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; RV32I-NEXT: vmv.v.x v10, a1
+; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32I-NEXT: vmul.vv v8, v8, v10
+; RV32I-NEXT: li a1, 56
+; RV32I-NEXT: vsrl.vx v8, v8, a1
+; RV32I-NEXT: vse64.v v8, (a0)
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: cttz_v4i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64I-NEXT: vle64.v v8, (a0)
+; RV64I-NEXT: li a1, 1
+; RV64I-NEXT: vsub.vx v10, v8, a1
+; RV64I-NEXT: vnot.v v8, v8
+; RV64I-NEXT: vand.vv v8, v8, v10
+; RV64I-NEXT: vsrl.vi v10, v8, 1
+; RV64I-NEXT: lui a1, 349525
+; RV64I-NEXT: addiw a1, a1, 1365
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vand.vx v10, v10, a1
+; RV64I-NEXT: vsub.vv v8, v8, v10
+; RV64I-NEXT: lui a1, 209715
+; RV64I-NEXT: addiw a1, a1, 819
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vand.vx v10, v8, a1
+; RV64I-NEXT: vsrl.vi v8, v8, 2
+; RV64I-NEXT: vand.vx v8, v8, a1
+; RV64I-NEXT: vadd.vv v8, v10, v8
+; RV64I-NEXT: vsrl.vi v10, v8, 4
+; RV64I-NEXT: vadd.vv v8, v8, v10
+; RV64I-NEXT: lui a1, 61681
+; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vand.vx v8, v8, a1
+; RV64I-NEXT: lui a1, 4112
+; RV64I-NEXT: addiw a1, a1, 257
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vmul.vx v8, v8, a1
+; RV64I-NEXT: li a1, 56
+; RV64I-NEXT: vsrl.vx v8, v8, a1
+; RV64I-NEXT: vse64.v v8, (a0)
+; RV64I-NEXT: ret
+;
+; RV32F-LABEL: cttz_v4i64:
+; RV32F: # %bb.0:
+; RV32F-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32F-NEXT: vle64.v v8, (a0)
+; RV32F-NEXT: vrsub.vi v10, v8, 0
+; RV32F-NEXT: vand.vv v10, v8, v10
+; RV32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV32F-NEXT: fsrmi a1, 1
+; RV32F-NEXT: vfncvt.f.xu.w v12, v10
+; RV32F-NEXT: fsrm a1
+; RV32F-NEXT: vsrl.vi v10, v12, 23
+; RV32F-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32F-NEXT: vzext.vf2 v12, v10
+; RV32F-NEXT: li a1, 127
+; RV32F-NEXT: vsub.vx v10, v12, a1
+; RV32F-NEXT: vmseq.vi v0, v8, 0
+; RV32F-NEXT: li a1, 64
+; RV32F-NEXT: vmerge.vxm v8, v10, a1, v0
+; RV32F-NEXT: vse64.v v8, (a0)
+; RV32F-NEXT: ret
+;
+; RV64F-LABEL: cttz_v4i64:
+; RV64F: # %bb.0:
+; RV64F-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64F-NEXT: vle64.v v8, (a0)
+; RV64F-NEXT: vrsub.vi v10, v8, 0
+; RV64F-NEXT: vand.vv v10, v8, v10
+; RV64F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64F-NEXT: fsrmi a1, 1
+; RV64F-NEXT: vfncvt.f.xu.w v12, v10
+; RV64F-NEXT: fsrm a1
+; RV64F-NEXT: vsrl.vi v10, v12, 23
+; RV64F-NEXT: li a1, 127
+; RV64F-NEXT: vwsubu.vx v12, v10, a1
+; RV64F-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64F-NEXT: vmseq.vi v0, v8, 0
+; RV64F-NEXT: li a1, 64
+; RV64F-NEXT: vmerge.vxm v8, v12, a1, v0
+; RV64F-NEXT: vse64.v v8, (a0)
+; RV64F-NEXT: ret
+;
+; RVD-LABEL: cttz_v4i64:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RVD-NEXT: vle64.v v8, (a0)
+; RVD-NEXT: vrsub.vi v10, v8, 0
+; RVD-NEXT: vand.vv v10, v8, v10
+; RVD-NEXT: fsrmi a1, 1
+; RVD-NEXT: vfcvt.f.xu.v v10, v10
+; RVD-NEXT: fsrm a1
+; RVD-NEXT: li a1, 52
+; RVD-NEXT: vsrl.vx v10, v10, a1
+; RVD-NEXT: li a1, 1023
+; RVD-NEXT: vsub.vx v10, v10, a1
+; RVD-NEXT: vmseq.vi v0, v8, 0
+; RVD-NEXT: li a1, 64
+; RVD-NEXT: vmerge.vxm v8, v10, a1, v0
+; RVD-NEXT: vse64.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: cttz_v4i64:
; ZVBB: # %bb.0:
@@ -1197,45 +807,62 @@ define void @cttz_v4i64(ptr %x, ptr %y) nounwind {
declare <4 x i64> @llvm.cttz.v4i64(<4 x i64>, i1)
define void @cttz_zero_undef_v16i8(ptr %x, ptr %y) nounwind {
-; CHECK-LABEL: cttz_zero_undef_v16i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: li a1, 1
-; CHECK-NEXT: vsub.vx v9, v8, a1
-; CHECK-NEXT: vnot.v v8, v8
-; CHECK-NEXT: vand.vv v8, v8, v9
-; CHECK-NEXT: vsrl.vi v9, v8, 1
-; CHECK-NEXT: li a1, 85
-; CHECK-NEXT: vand.vx v9, v9, a1
-; CHECK-NEXT: vsub.vv v8, v8, v9
-; CHECK-NEXT: li a1, 51
-; CHECK-NEXT: vand.vx v9, v8, a1
-; CHECK-NEXT: vsrl.vi v8, v8, 2
-; CHECK-NEXT: vand.vx v8, v8, a1
-; CHECK-NEXT: vadd.vv v8, v9, v8
-; CHECK-NEXT: vsrl.vi v9, v8, 4
-; CHECK-NEXT: vadd.vv v8, v8, v9
-; CHECK-NEXT: vand.vi v8, v8, 15
-; CHECK-NEXT: vse8.v v8, (a0)
-; CHECK-NEXT: ret
-;
-; LMULMAX8-LABEL: cttz_zero_undef_v16i8:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX8-NEXT: vle8.v v8, (a0)
-; LMULMAX8-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX8-NEXT: vand.vv v8, v8, v9
-; LMULMAX8-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; LMULMAX8-NEXT: vzext.vf2 v10, v8
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v12, v10
-; LMULMAX8-NEXT: vnsrl.wi v8, v12, 23
-; LMULMAX8-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; LMULMAX8-NEXT: vnsrl.wi v10, v8, 0
-; LMULMAX8-NEXT: li a1, 127
-; LMULMAX8-NEXT: vsub.vx v8, v10, a1
-; LMULMAX8-NEXT: vse8.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RVI-LABEL: cttz_zero_undef_v16i8:
+; RVI: # %bb.0:
+; RVI-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RVI-NEXT: vle8.v v8, (a0)
+; RVI-NEXT: li a1, 1
+; RVI-NEXT: vsub.vx v9, v8, a1
+; RVI-NEXT: vnot.v v8, v8
+; RVI-NEXT: vand.vv v8, v8, v9
+; RVI-NEXT: vsrl.vi v9, v8, 1
+; RVI-NEXT: li a1, 85
+; RVI-NEXT: vand.vx v9, v9, a1
+; RVI-NEXT: vsub.vv v8, v8, v9
+; RVI-NEXT: li a1, 51
+; RVI-NEXT: vand.vx v9, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 2
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: vadd.vv v8, v9, v8
+; RVI-NEXT: vsrl.vi v9, v8, 4
+; RVI-NEXT: vadd.vv v8, v8, v9
+; RVI-NEXT: vand.vi v8, v8, 15
+; RVI-NEXT: vse8.v v8, (a0)
+; RVI-NEXT: ret
+;
+; RVF-LABEL: cttz_zero_undef_v16i8:
+; RVF: # %bb.0:
+; RVF-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RVF-NEXT: vle8.v v8, (a0)
+; RVF-NEXT: vrsub.vi v9, v8, 0
+; RVF-NEXT: vand.vv v8, v8, v9
+; RVF-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; RVF-NEXT: vzext.vf2 v10, v8
+; RVF-NEXT: vfwcvt.f.xu.v v12, v10
+; RVF-NEXT: vnsrl.wi v8, v12, 23
+; RVF-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; RVF-NEXT: vnsrl.wi v10, v8, 0
+; RVF-NEXT: li a1, 127
+; RVF-NEXT: vsub.vx v8, v10, a1
+; RVF-NEXT: vse8.v v8, (a0)
+; RVF-NEXT: ret
+;
+; RVD-LABEL: cttz_zero_undef_v16i8:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RVD-NEXT: vle8.v v8, (a0)
+; RVD-NEXT: vrsub.vi v9, v8, 0
+; RVD-NEXT: vand.vv v8, v8, v9
+; RVD-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; RVD-NEXT: vzext.vf2 v10, v8
+; RVD-NEXT: vfwcvt.f.xu.v v12, v10
+; RVD-NEXT: vnsrl.wi v8, v12, 23
+; RVD-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; RVD-NEXT: vnsrl.wi v10, v8, 0
+; RVD-NEXT: li a1, 127
+; RVD-NEXT: vsub.vx v8, v10, a1
+; RVD-NEXT: vse8.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: cttz_zero_undef_v16i8:
; ZVBB: # %bb.0:
@@ -1252,160 +879,61 @@ define void @cttz_zero_undef_v16i8(ptr %x, ptr %y) nounwind {
}
define void @cttz_zero_undef_v8i16(ptr %x, ptr %y) nounwind {
-; LMULMAX2-RV32I-LABEL: cttz_zero_undef_v8i16:
-; LMULMAX2-RV32I: # %bb.0:
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: li a1, 1
-; LMULMAX2-RV32I-NEXT: vsub.vx v9, v8, a1
-; LMULMAX2-RV32I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV32I-NEXT: lui a1, 5
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV32I-NEXT: vand.vx v9, v9, a1
-; LMULMAX2-RV32I-NEXT: vsub.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 3
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV32I-NEXT: vand.vx v9, v8, a1
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV32I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v9, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 1
-; LMULMAX2-RV32I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV32I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: li a1, 257
-; LMULMAX2-RV32I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 8
-; LMULMAX2-RV32I-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: ret
-;
-; LMULMAX2-RV64I-LABEL: cttz_zero_undef_v8i16:
-; LMULMAX2-RV64I: # %bb.0:
-; LMULMAX2-RV64I-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX2-RV64I-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: li a1, 1
-; LMULMAX2-RV64I-NEXT: vsub.vx v9, v8, a1
-; LMULMAX2-RV64I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV64I-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV64I-NEXT: lui a1, 5
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV64I-NEXT: vand.vx v9, v9, a1
-; LMULMAX2-RV64I-NEXT: vsub.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: lui a1, 3
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV64I-NEXT: vand.vx v9, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v9, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: lui a1, 1
-; LMULMAX2-RV64I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: li a1, 257
-; LMULMAX2-RV64I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 8
-; LMULMAX2-RV64I-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: ret
-;
-; LMULMAX1-LABEL: cttz_zero_undef_v8i16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vle16.v v8, (a0)
-; LMULMAX1-NEXT: li a1, 1
-; LMULMAX1-NEXT: vsub.vx v9, v8, a1
-; LMULMAX1-NEXT: vnot.v v8, v8
-; LMULMAX1-NEXT: vand.vv v8, v8, v9
-; LMULMAX1-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX1-NEXT: lui a1, 5
-; LMULMAX1-NEXT: addi a1, a1, 1365
-; LMULMAX1-NEXT: vand.vx v9, v9, a1
-; LMULMAX1-NEXT: vsub.vv v8, v8, v9
-; LMULMAX1-NEXT: lui a1, 3
-; LMULMAX1-NEXT: addi a1, a1, 819
-; LMULMAX1-NEXT: vand.vx v9, v8, a1
-; LMULMAX1-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX1-NEXT: vand.vx v8, v8, a1
-; LMULMAX1-NEXT: vadd.vv v8, v9, v8
-; LMULMAX1-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX1-NEXT: vadd.vv v8, v8, v9
-; LMULMAX1-NEXT: lui a1, 1
-; LMULMAX1-NEXT: addi a1, a1, -241
-; LMULMAX1-NEXT: vand.vx v8, v8, a1
-; LMULMAX1-NEXT: li a1, 257
-; LMULMAX1-NEXT: vmul.vx v8, v8, a1
-; LMULMAX1-NEXT: vsrl.vi v8, v8, 8
-; LMULMAX1-NEXT: vse16.v v8, (a0)
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX2-RV32F-LABEL: cttz_zero_undef_v8i16:
-; LMULMAX2-RV32F: # %bb.0:
-; LMULMAX2-RV32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX2-RV32F-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX2-RV32F-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV32F-NEXT: vfwcvt.f.xu.v v10, v8
-; LMULMAX2-RV32F-NEXT: vnsrl.wi v8, v10, 23
-; LMULMAX2-RV32F-NEXT: li a1, 127
-; LMULMAX2-RV32F-NEXT: vsub.vx v8, v8, a1
-; LMULMAX2-RV32F-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: ret
-;
-; LMULMAX2-RV64F-LABEL: cttz_zero_undef_v8i16:
-; LMULMAX2-RV64F: # %bb.0:
-; LMULMAX2-RV64F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX2-RV64F-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX2-RV64F-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV64F-NEXT: vfwcvt.f.xu.v v10, v8
-; LMULMAX2-RV64F-NEXT: vnsrl.wi v8, v10, 23
-; LMULMAX2-RV64F-NEXT: li a1, 127
-; LMULMAX2-RV64F-NEXT: vsub.vx v8, v8, a1
-; LMULMAX2-RV64F-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: ret
-;
-; LMULMAX2-RV32D-LABEL: cttz_zero_undef_v8i16:
-; LMULMAX2-RV32D: # %bb.0:
-; LMULMAX2-RV32D-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX2-RV32D-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX2-RV32D-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV32D-NEXT: vfwcvt.f.xu.v v10, v8
-; LMULMAX2-RV32D-NEXT: vnsrl.wi v8, v10, 23
-; LMULMAX2-RV32D-NEXT: li a1, 127
-; LMULMAX2-RV32D-NEXT: vsub.vx v8, v8, a1
-; LMULMAX2-RV32D-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: ret
-;
-; LMULMAX2-RV64D-LABEL: cttz_zero_undef_v8i16:
-; LMULMAX2-RV64D: # %bb.0:
-; LMULMAX2-RV64D-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX2-RV64D-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX2-RV64D-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV64D-NEXT: vfwcvt.f.xu.v v10, v8
-; LMULMAX2-RV64D-NEXT: vnsrl.wi v8, v10, 23
-; LMULMAX2-RV64D-NEXT: li a1, 127
-; LMULMAX2-RV64D-NEXT: vsub.vx v8, v8, a1
-; LMULMAX2-RV64D-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: ret
-;
-; LMULMAX8-LABEL: cttz_zero_undef_v8i16:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX8-NEXT: vle16.v v8, (a0)
-; LMULMAX8-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX8-NEXT: vand.vv v8, v8, v9
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v10, v8
-; LMULMAX8-NEXT: vnsrl.wi v8, v10, 23
-; LMULMAX8-NEXT: li a1, 127
-; LMULMAX8-NEXT: vsub.vx v8, v8, a1
-; LMULMAX8-NEXT: vse16.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RVI-LABEL: cttz_zero_undef_v8i16:
+; RVI: # %bb.0:
+; RVI-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RVI-NEXT: vle16.v v8, (a0)
+; RVI-NEXT: li a1, 1
+; RVI-NEXT: vsub.vx v9, v8, a1
+; RVI-NEXT: vnot.v v8, v8
+; RVI-NEXT: vand.vv v8, v8, v9
+; RVI-NEXT: vsrl.vi v9, v8, 1
+; RVI-NEXT: lui a1, 5
+; RVI-NEXT: addi a1, a1, 1365
+; RVI-NEXT: vand.vx v9, v9, a1
+; RVI-NEXT: vsub.vv v8, v8, v9
+; RVI-NEXT: lui a1, 3
+; RVI-NEXT: addi a1, a1, 819
+; RVI-NEXT: vand.vx v9, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 2
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: vadd.vv v8, v9, v8
+; RVI-NEXT: vsrl.vi v9, v8, 4
+; RVI-NEXT: vadd.vv v8, v8, v9
+; RVI-NEXT: lui a1, 1
+; RVI-NEXT: addi a1, a1, -241
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: li a1, 257
+; RVI-NEXT: vmul.vx v8, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 8
+; RVI-NEXT: vse16.v v8, (a0)
+; RVI-NEXT: ret
+;
+; RVF-LABEL: cttz_zero_undef_v8i16:
+; RVF: # %bb.0:
+; RVF-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RVF-NEXT: vle16.v v8, (a0)
+; RVF-NEXT: vrsub.vi v9, v8, 0
+; RVF-NEXT: vand.vv v8, v8, v9
+; RVF-NEXT: vfwcvt.f.xu.v v10, v8
+; RVF-NEXT: vnsrl.wi v8, v10, 23
+; RVF-NEXT: li a1, 127
+; RVF-NEXT: vsub.vx v8, v8, a1
+; RVF-NEXT: vse16.v v8, (a0)
+; RVF-NEXT: ret
+;
+; RVD-LABEL: cttz_zero_undef_v8i16:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RVD-NEXT: vle16.v v8, (a0)
+; RVD-NEXT: vrsub.vi v9, v8, 0
+; RVD-NEXT: vand.vv v8, v8, v9
+; RVD-NEXT: vfwcvt.f.xu.v v10, v8
+; RVD-NEXT: vnsrl.wi v8, v10, 23
+; RVD-NEXT: li a1, 127
+; RVD-NEXT: vsub.vx v8, v8, a1
+; RVD-NEXT: vse16.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: cttz_zero_undef_v8i16:
; ZVBB: # %bb.0:
@@ -1422,139 +950,65 @@ define void @cttz_zero_undef_v8i16(ptr %x, ptr %y) nounwind {
}
define void @cttz_zero_undef_v4i32(ptr %x, ptr %y) nounwind {
-; LMULMAX2-RV32I-LABEL: cttz_zero_undef_v4i32:
-; LMULMAX2-RV32I: # %bb.0:
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: li a1, 1
-; LMULMAX2-RV32I-NEXT: vsub.vx v9, v8, a1
-; LMULMAX2-RV32I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV32I-NEXT: lui a1, 349525
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV32I-NEXT: vand.vx v9, v9, a1
-; LMULMAX2-RV32I-NEXT: vsub.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 209715
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV32I-NEXT: vand.vx v9, v8, a1
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV32I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v9, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 61681
-; LMULMAX2-RV32I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV32I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: lui a1, 4112
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 257
-; LMULMAX2-RV32I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 24
-; LMULMAX2-RV32I-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: ret
-;
-; LMULMAX2-RV64I-LABEL: cttz_zero_undef_v4i32:
-; LMULMAX2-RV64I: # %bb.0:
-; LMULMAX2-RV64I-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX2-RV64I-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: li a1, 1
-; LMULMAX2-RV64I-NEXT: vsub.vx v9, v8, a1
-; LMULMAX2-RV64I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV64I-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV64I-NEXT: lui a1, 349525
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV64I-NEXT: vand.vx v9, v9, a1
-; LMULMAX2-RV64I-NEXT: vsub.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: lui a1, 209715
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV64I-NEXT: vand.vx v9, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v9, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: lui a1, 61681
-; LMULMAX2-RV64I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: lui a1, 4112
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 257
-; LMULMAX2-RV64I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 24
-; LMULMAX2-RV64I-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: ret
-;
-; LMULMAX2-RV32F-LABEL: cttz_zero_undef_v4i32:
-; LMULMAX2-RV32F: # %bb.0:
-; LMULMAX2-RV32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX2-RV32F-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX2-RV32F-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV32F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32F-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV32F-NEXT: fsrm a1
-; LMULMAX2-RV32F-NEXT: vsrl.vi v8, v8, 23
-; LMULMAX2-RV32F-NEXT: li a1, 127
-; LMULMAX2-RV32F-NEXT: vsub.vx v8, v8, a1
-; LMULMAX2-RV32F-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: ret
-;
-; LMULMAX2-RV64F-LABEL: cttz_zero_undef_v4i32:
-; LMULMAX2-RV64F: # %bb.0:
-; LMULMAX2-RV64F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX2-RV64F-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX2-RV64F-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV64F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64F-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV64F-NEXT: fsrm a1
-; LMULMAX2-RV64F-NEXT: vsrl.vi v8, v8, 23
-; LMULMAX2-RV64F-NEXT: li a1, 127
-; LMULMAX2-RV64F-NEXT: vsub.vx v8, v8, a1
-; LMULMAX2-RV64F-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: ret
-;
-; LMULMAX2-RV32D-LABEL: cttz_zero_undef_v4i32:
-; LMULMAX2-RV32D: # %bb.0:
-; LMULMAX2-RV32D-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX2-RV32D-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX2-RV32D-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV32D-NEXT: vfwcvt.f.xu.v v10, v8
-; LMULMAX2-RV32D-NEXT: li a1, 52
-; LMULMAX2-RV32D-NEXT: vnsrl.wx v8, v10, a1
-; LMULMAX2-RV32D-NEXT: li a1, 1023
-; LMULMAX2-RV32D-NEXT: vsub.vx v8, v8, a1
-; LMULMAX2-RV32D-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: ret
-;
-; LMULMAX2-RV64D-LABEL: cttz_zero_undef_v4i32:
-; LMULMAX2-RV64D: # %bb.0:
-; LMULMAX2-RV64D-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX2-RV64D-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX2-RV64D-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV64D-NEXT: vfwcvt.f.xu.v v10, v8
-; LMULMAX2-RV64D-NEXT: li a1, 52
-; LMULMAX2-RV64D-NEXT: vnsrl.wx v8, v10, a1
-; LMULMAX2-RV64D-NEXT: li a1, 1023
-; LMULMAX2-RV64D-NEXT: vsub.vx v8, v8, a1
-; LMULMAX2-RV64D-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: ret
-;
-; LMULMAX8-LABEL: cttz_zero_undef_v4i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX8-NEXT: vle32.v v8, (a0)
-; LMULMAX8-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX8-NEXT: vand.vv v8, v8, v9
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v10, v8
-; LMULMAX8-NEXT: li a1, 52
-; LMULMAX8-NEXT: vnsrl.wx v8, v10, a1
-; LMULMAX8-NEXT: li a1, 1023
-; LMULMAX8-NEXT: vsub.vx v8, v8, a1
-; LMULMAX8-NEXT: vse32.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RVI-LABEL: cttz_zero_undef_v4i32:
+; RVI: # %bb.0:
+; RVI-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RVI-NEXT: vle32.v v8, (a0)
+; RVI-NEXT: li a1, 1
+; RVI-NEXT: vsub.vx v9, v8, a1
+; RVI-NEXT: vnot.v v8, v8
+; RVI-NEXT: vand.vv v8, v8, v9
+; RVI-NEXT: vsrl.vi v9, v8, 1
+; RVI-NEXT: lui a1, 349525
+; RVI-NEXT: addi a1, a1, 1365
+; RVI-NEXT: vand.vx v9, v9, a1
+; RVI-NEXT: vsub.vv v8, v8, v9
+; RVI-NEXT: lui a1, 209715
+; RVI-NEXT: addi a1, a1, 819
+; RVI-NEXT: vand.vx v9, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 2
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: vadd.vv v8, v9, v8
+; RVI-NEXT: vsrl.vi v9, v8, 4
+; RVI-NEXT: vadd.vv v8, v8, v9
+; RVI-NEXT: lui a1, 61681
+; RVI-NEXT: addi a1, a1, -241
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: lui a1, 4112
+; RVI-NEXT: addi a1, a1, 257
+; RVI-NEXT: vmul.vx v8, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 24
+; RVI-NEXT: vse32.v v8, (a0)
+; RVI-NEXT: ret
+;
+; RVF-LABEL: cttz_zero_undef_v4i32:
+; RVF: # %bb.0:
+; RVF-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RVF-NEXT: vle32.v v8, (a0)
+; RVF-NEXT: vrsub.vi v9, v8, 0
+; RVF-NEXT: vand.vv v8, v8, v9
+; RVF-NEXT: fsrmi a1, 1
+; RVF-NEXT: vfcvt.f.xu.v v8, v8
+; RVF-NEXT: fsrm a1
+; RVF-NEXT: vsrl.vi v8, v8, 23
+; RVF-NEXT: li a1, 127
+; RVF-NEXT: vsub.vx v8, v8, a1
+; RVF-NEXT: vse32.v v8, (a0)
+; RVF-NEXT: ret
+;
+; RVD-LABEL: cttz_zero_undef_v4i32:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RVD-NEXT: vle32.v v8, (a0)
+; RVD-NEXT: vrsub.vi v9, v8, 0
+; RVD-NEXT: vand.vv v8, v8, v9
+; RVD-NEXT: vfwcvt.f.xu.v v10, v8
+; RVD-NEXT: li a1, 52
+; RVD-NEXT: vnsrl.wx v8, v10, a1
+; RVD-NEXT: li a1, 1023
+; RVD-NEXT: vsub.vx v8, v8, a1
+; RVD-NEXT: vse32.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: cttz_zero_undef_v4i32:
; ZVBB: # %bb.0:
@@ -1571,171 +1025,139 @@ define void @cttz_zero_undef_v4i32(ptr %x, ptr %y) nounwind {
}
define void @cttz_zero_undef_v2i64(ptr %x, ptr %y) nounwind {
-; LMULMAX2-RV32I-LABEL: cttz_zero_undef_v2i64:
-; LMULMAX2-RV32I: # %bb.0:
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: li a1, 1
-; LMULMAX2-RV32I-NEXT: vsub.vx v9, v8, a1
-; LMULMAX2-RV32I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV32I-NEXT: lui a1, 349525
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vand.vv v9, v9, v10
-; LMULMAX2-RV32I-NEXT: vsub.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 209715
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v9, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vand.vv v10, v8, v9
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 61681
-; LMULMAX2-RV32I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v9, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: lui a1, 4112
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 257
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v9, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32I-NEXT: vmul.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT: li a1, 56
-; LMULMAX2-RV32I-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: ret
-;
-; LMULMAX2-RV64I-LABEL: cttz_zero_undef_v2i64:
-; LMULMAX2-RV64I: # %bb.0:
-; LMULMAX2-RV64I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV64I-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: li a1, 1
-; LMULMAX2-RV64I-NEXT: vsub.vx v9, v8, a1
-; LMULMAX2-RV64I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV64I-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1
-; LMULMAX2-RV64I-NEXT: lui a1, 349525
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, 1365
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vand.vx v9, v9, a1
-; LMULMAX2-RV64I-NEXT: vsub.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: lui a1, 209715
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, 819
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vand.vx v9, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v9, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 4
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT: lui a1, 61681
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, -241
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: lui a1, 4112
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, 257
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: li a1, 56
-; LMULMAX2-RV64I-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: ret
-;
-; LMULMAX2-RV32F-LABEL: cttz_zero_undef_v2i64:
-; LMULMAX2-RV32F: # %bb.0:
-; LMULMAX2-RV32F-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32F-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX2-RV32F-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV32F-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; LMULMAX2-RV32F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32F-NEXT: vfncvt.f.xu.w v9, v8
-; LMULMAX2-RV32F-NEXT: fsrm a1
-; LMULMAX2-RV32F-NEXT: vsrl.vi v8, v9, 23
-; LMULMAX2-RV32F-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; LMULMAX2-RV32F-NEXT: vzext.vf2 v9, v8
-; LMULMAX2-RV32F-NEXT: li a1, 127
-; LMULMAX2-RV32F-NEXT: vsub.vx v8, v9, a1
-; LMULMAX2-RV32F-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: ret
-;
-; LMULMAX2-RV64F-LABEL: cttz_zero_undef_v2i64:
-; LMULMAX2-RV64F: # %bb.0:
-; LMULMAX2-RV64F-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV64F-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX2-RV64F-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV64F-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; LMULMAX2-RV64F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64F-NEXT: vfncvt.f.xu.w v9, v8
-; LMULMAX2-RV64F-NEXT: fsrm a1
-; LMULMAX2-RV64F-NEXT: vsrl.vi v8, v9, 23
-; LMULMAX2-RV64F-NEXT: li a1, 127
-; LMULMAX2-RV64F-NEXT: vwsubu.vx v9, v8, a1
-; LMULMAX2-RV64F-NEXT: vse64.v v9, (a0)
-; LMULMAX2-RV64F-NEXT: ret
-;
-; LMULMAX2-RV32D-LABEL: cttz_zero_undef_v2i64:
-; LMULMAX2-RV32D: # %bb.0:
-; LMULMAX2-RV32D-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32D-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX2-RV32D-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV32D-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32D-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV32D-NEXT: fsrm a1
-; LMULMAX2-RV32D-NEXT: li a1, 52
-; LMULMAX2-RV32D-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV32D-NEXT: li a1, 1023
-; LMULMAX2-RV32D-NEXT: vsub.vx v8, v8, a1
-; LMULMAX2-RV32D-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: ret
-;
-; LMULMAX2-RV64D-LABEL: cttz_zero_undef_v2i64:
-; LMULMAX2-RV64D: # %bb.0:
-; LMULMAX2-RV64D-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV64D-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX2-RV64D-NEXT: vand.vv v8, v8, v9
-; LMULMAX2-RV64D-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64D-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV64D-NEXT: fsrm a1
-; LMULMAX2-RV64D-NEXT: li a1, 52
-; LMULMAX2-RV64D-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV64D-NEXT: li a1, 1023
-; LMULMAX2-RV64D-NEXT: vsub.vx v8, v8, a1
-; LMULMAX2-RV64D-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: ret
-;
-; LMULMAX8-LABEL: cttz_zero_undef_v2i64:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX8-NEXT: vle64.v v8, (a0)
-; LMULMAX8-NEXT: vrsub.vi v9, v8, 0
-; LMULMAX8-NEXT: vand.vv v8, v8, v9
-; LMULMAX8-NEXT: fsrmi a1, 1
-; LMULMAX8-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX8-NEXT: fsrm a1
-; LMULMAX8-NEXT: li a1, 52
-; LMULMAX8-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX8-NEXT: li a1, 1023
-; LMULMAX8-NEXT: vsub.vx v8, v8, a1
-; LMULMAX8-NEXT: vse64.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RV32I-LABEL: cttz_zero_undef_v2i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32I-NEXT: vle64.v v8, (a0)
+; RV32I-NEXT: li a1, 1
+; RV32I-NEXT: vsub.vx v9, v8, a1
+; RV32I-NEXT: vnot.v v8, v8
+; RV32I-NEXT: vand.vv v8, v8, v9
+; RV32I-NEXT: vsrl.vi v9, v8, 1
+; RV32I-NEXT: lui a1, 349525
+; RV32I-NEXT: addi a1, a1, 1365
+; RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
+; RV32I-NEXT: vmv.v.x v10, a1
+; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32I-NEXT: vand.vv v9, v9, v10
+; RV32I-NEXT: vsub.vv v8, v8, v9
+; RV32I-NEXT: lui a1, 209715
+; RV32I-NEXT: addi a1, a1, 819
+; RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
+; RV32I-NEXT: vmv.v.x v9, a1
+; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32I-NEXT: vand.vv v10, v8, v9
+; RV32I-NEXT: vsrl.vi v8, v8, 2
+; RV32I-NEXT: vand.vv v8, v8, v9
+; RV32I-NEXT: vadd.vv v8, v10, v8
+; RV32I-NEXT: vsrl.vi v9, v8, 4
+; RV32I-NEXT: vadd.vv v8, v8, v9
+; RV32I-NEXT: lui a1, 61681
+; RV32I-NEXT: addi a1, a1, -241
+; RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
+; RV32I-NEXT: vmv.v.x v9, a1
+; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32I-NEXT: vand.vv v8, v8, v9
+; RV32I-NEXT: lui a1, 4112
+; RV32I-NEXT: addi a1, a1, 257
+; RV32I-NEXT: vsetvli a2, zero, e32, m1, ta, ma
+; RV32I-NEXT: vmv.v.x v9, a1
+; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32I-NEXT: vmul.vv v8, v8, v9
+; RV32I-NEXT: li a1, 56
+; RV32I-NEXT: vsrl.vx v8, v8, a1
+; RV32I-NEXT: vse64.v v8, (a0)
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: cttz_zero_undef_v2i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64I-NEXT: vle64.v v8, (a0)
+; RV64I-NEXT: li a1, 1
+; RV64I-NEXT: vsub.vx v9, v8, a1
+; RV64I-NEXT: vnot.v v8, v8
+; RV64I-NEXT: vand.vv v8, v8, v9
+; RV64I-NEXT: vsrl.vi v9, v8, 1
+; RV64I-NEXT: lui a1, 349525
+; RV64I-NEXT: addiw a1, a1, 1365
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vand.vx v9, v9, a1
+; RV64I-NEXT: vsub.vv v8, v8, v9
+; RV64I-NEXT: lui a1, 209715
+; RV64I-NEXT: addiw a1, a1, 819
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vand.vx v9, v8, a1
+; RV64I-NEXT: vsrl.vi v8, v8, 2
+; RV64I-NEXT: vand.vx v8, v8, a1
+; RV64I-NEXT: vadd.vv v8, v9, v8
+; RV64I-NEXT: vsrl.vi v9, v8, 4
+; RV64I-NEXT: vadd.vv v8, v8, v9
+; RV64I-NEXT: lui a1, 61681
+; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vand.vx v8, v8, a1
+; RV64I-NEXT: lui a1, 4112
+; RV64I-NEXT: addiw a1, a1, 257
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vmul.vx v8, v8, a1
+; RV64I-NEXT: li a1, 56
+; RV64I-NEXT: vsrl.vx v8, v8, a1
+; RV64I-NEXT: vse64.v v8, (a0)
+; RV64I-NEXT: ret
+;
+; RV32F-LABEL: cttz_zero_undef_v2i64:
+; RV32F: # %bb.0:
+; RV32F-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32F-NEXT: vle64.v v8, (a0)
+; RV32F-NEXT: vrsub.vi v9, v8, 0
+; RV32F-NEXT: vand.vv v8, v8, v9
+; RV32F-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32F-NEXT: fsrmi a1, 1
+; RV32F-NEXT: vfncvt.f.xu.w v9, v8
+; RV32F-NEXT: fsrm a1
+; RV32F-NEXT: vsrl.vi v8, v9, 23
+; RV32F-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32F-NEXT: vzext.vf2 v9, v8
+; RV32F-NEXT: li a1, 127
+; RV32F-NEXT: vsub.vx v8, v9, a1
+; RV32F-NEXT: vse64.v v8, (a0)
+; RV32F-NEXT: ret
+;
+; RV64F-LABEL: cttz_zero_undef_v2i64:
+; RV64F: # %bb.0:
+; RV64F-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64F-NEXT: vle64.v v8, (a0)
+; RV64F-NEXT: vrsub.vi v9, v8, 0
+; RV64F-NEXT: vand.vv v8, v8, v9
+; RV64F-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64F-NEXT: fsrmi a1, 1
+; RV64F-NEXT: vfncvt.f.xu.w v9, v8
+; RV64F-NEXT: fsrm a1
+; RV64F-NEXT: vsrl.vi v8, v9, 23
+; RV64F-NEXT: li a1, 127
+; RV64F-NEXT: vwsubu.vx v9, v8, a1
+; RV64F-NEXT: vse64.v v9, (a0)
+; RV64F-NEXT: ret
+;
+; RVD-LABEL: cttz_zero_undef_v2i64:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RVD-NEXT: vle64.v v8, (a0)
+; RVD-NEXT: vrsub.vi v9, v8, 0
+; RVD-NEXT: vand.vv v8, v8, v9
+; RVD-NEXT: fsrmi a1, 1
+; RVD-NEXT: vfcvt.f.xu.v v8, v8
+; RVD-NEXT: fsrm a1
+; RVD-NEXT: li a1, 52
+; RVD-NEXT: vsrl.vx v8, v8, a1
+; RVD-NEXT: li a1, 1023
+; RVD-NEXT: vsub.vx v8, v8, a1
+; RVD-NEXT: vse64.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: cttz_zero_undef_v2i64:
; ZVBB: # %bb.0:
@@ -1752,86 +1174,65 @@ define void @cttz_zero_undef_v2i64(ptr %x, ptr %y) nounwind {
}
define void @cttz_zero_undef_v32i8(ptr %x, ptr %y) nounwind {
-; LMULMAX2-LABEL: cttz_zero_undef_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a1, 32
-; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: li a1, 1
-; LMULMAX2-NEXT: vsub.vx v10, v8, a1
-; LMULMAX2-NEXT: vnot.v v8, v8
-; LMULMAX2-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-NEXT: li a1, 85
-; LMULMAX2-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-NEXT: li a1, 51
-; LMULMAX2-NEXT: vand.vx v10, v8, a1
-; LMULMAX2-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-NEXT: vand.vi v8, v8, 15
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: cttz_zero_undef_v32i8:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vle8.v v8, (a1)
-; LMULMAX1-NEXT: vle8.v v9, (a0)
-; LMULMAX1-NEXT: li a2, 1
-; LMULMAX1-NEXT: vsub.vx v10, v8, a2
-; LMULMAX1-NEXT: vnot.v v8, v8
-; LMULMAX1-NEXT: vand.vv v8, v8, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX1-NEXT: li a3, 85
-; LMULMAX1-NEXT: vand.vx v10, v10, a3
-; LMULMAX1-NEXT: vsub.vv v8, v8, v10
-; LMULMAX1-NEXT: li a4, 51
-; LMULMAX1-NEXT: vand.vx v10, v8, a4
-; LMULMAX1-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX1-NEXT: vand.vx v8, v8, a4
-; LMULMAX1-NEXT: vadd.vv v8, v10, v8
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX1-NEXT: vadd.vv v8, v8, v10
-; LMULMAX1-NEXT: vand.vi v8, v8, 15
-; LMULMAX1-NEXT: vsub.vx v10, v9, a2
-; LMULMAX1-NEXT: vnot.v v9, v9
-; LMULMAX1-NEXT: vand.vv v9, v9, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 1
-; LMULMAX1-NEXT: vand.vx v10, v10, a3
-; LMULMAX1-NEXT: vsub.vv v9, v9, v10
-; LMULMAX1-NEXT: vand.vx v10, v9, a4
-; LMULMAX1-NEXT: vsrl.vi v9, v9, 2
-; LMULMAX1-NEXT: vand.vx v9, v9, a4
-; LMULMAX1-NEXT: vadd.vv v9, v10, v9
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 4
-; LMULMAX1-NEXT: vadd.vv v9, v9, v10
-; LMULMAX1-NEXT: vand.vi v9, v9, 15
-; LMULMAX1-NEXT: vse8.v v9, (a0)
-; LMULMAX1-NEXT: vse8.v v8, (a1)
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX8-LABEL: cttz_zero_undef_v32i8:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a1, 32
-; LMULMAX8-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; LMULMAX8-NEXT: vle8.v v8, (a0)
-; LMULMAX8-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX8-NEXT: vand.vv v8, v8, v10
-; LMULMAX8-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; LMULMAX8-NEXT: vzext.vf2 v12, v8
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v16, v12
-; LMULMAX8-NEXT: vnsrl.wi v8, v16, 23
-; LMULMAX8-NEXT: vsetvli zero, zero, e8, m2, ta, ma
-; LMULMAX8-NEXT: vnsrl.wi v12, v8, 0
-; LMULMAX8-NEXT: li a1, 127
-; LMULMAX8-NEXT: vsub.vx v8, v12, a1
-; LMULMAX8-NEXT: vse8.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RVI-LABEL: cttz_zero_undef_v32i8:
+; RVI: # %bb.0:
+; RVI-NEXT: li a1, 32
+; RVI-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RVI-NEXT: vle8.v v8, (a0)
+; RVI-NEXT: li a1, 1
+; RVI-NEXT: vsub.vx v10, v8, a1
+; RVI-NEXT: vnot.v v8, v8
+; RVI-NEXT: vand.vv v8, v8, v10
+; RVI-NEXT: vsrl.vi v10, v8, 1
+; RVI-NEXT: li a1, 85
+; RVI-NEXT: vand.vx v10, v10, a1
+; RVI-NEXT: vsub.vv v8, v8, v10
+; RVI-NEXT: li a1, 51
+; RVI-NEXT: vand.vx v10, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 2
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: vadd.vv v8, v10, v8
+; RVI-NEXT: vsrl.vi v10, v8, 4
+; RVI-NEXT: vadd.vv v8, v8, v10
+; RVI-NEXT: vand.vi v8, v8, 15
+; RVI-NEXT: vse8.v v8, (a0)
+; RVI-NEXT: ret
+;
+; RVF-LABEL: cttz_zero_undef_v32i8:
+; RVF: # %bb.0:
+; RVF-NEXT: li a1, 32
+; RVF-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RVF-NEXT: vle8.v v8, (a0)
+; RVF-NEXT: vrsub.vi v10, v8, 0
+; RVF-NEXT: vand.vv v8, v8, v10
+; RVF-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; RVF-NEXT: vzext.vf2 v12, v8
+; RVF-NEXT: vfwcvt.f.xu.v v16, v12
+; RVF-NEXT: vnsrl.wi v8, v16, 23
+; RVF-NEXT: vsetvli zero, zero, e8, m2, ta, ma
+; RVF-NEXT: vnsrl.wi v12, v8, 0
+; RVF-NEXT: li a1, 127
+; RVF-NEXT: vsub.vx v8, v12, a1
+; RVF-NEXT: vse8.v v8, (a0)
+; RVF-NEXT: ret
+;
+; RVD-LABEL: cttz_zero_undef_v32i8:
+; RVD: # %bb.0:
+; RVD-NEXT: li a1, 32
+; RVD-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RVD-NEXT: vle8.v v8, (a0)
+; RVD-NEXT: vrsub.vi v10, v8, 0
+; RVD-NEXT: vand.vv v8, v8, v10
+; RVD-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; RVD-NEXT: vzext.vf2 v12, v8
+; RVD-NEXT: vfwcvt.f.xu.v v16, v12
+; RVD-NEXT: vnsrl.wi v8, v16, 23
+; RVD-NEXT: vsetvli zero, zero, e8, m2, ta, ma
+; RVD-NEXT: vnsrl.wi v12, v8, 0
+; RVD-NEXT: li a1, 127
+; RVD-NEXT: vsub.vx v8, v12, a1
+; RVD-NEXT: vse8.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: cttz_zero_undef_v32i8:
; ZVBB: # %bb.0:
@@ -1849,96 +1250,61 @@ define void @cttz_zero_undef_v32i8(ptr %x, ptr %y) nounwind {
}
define void @cttz_zero_undef_v16i16(ptr %x, ptr %y) nounwind {
-; LMULMAX2-LABEL: cttz_zero_undef_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: li a1, 1
-; LMULMAX2-NEXT: vsub.vx v10, v8, a1
-; LMULMAX2-NEXT: vnot.v v8, v8
-; LMULMAX2-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-NEXT: lui a1, 5
-; LMULMAX2-NEXT: addi a1, a1, 1365
-; LMULMAX2-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-NEXT: lui a1, 3
-; LMULMAX2-NEXT: addi a1, a1, 819
-; LMULMAX2-NEXT: vand.vx v10, v8, a1
-; LMULMAX2-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-NEXT: lui a1, 1
-; LMULMAX2-NEXT: addi a1, a1, -241
-; LMULMAX2-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-NEXT: li a1, 257
-; LMULMAX2-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-NEXT: vsrl.vi v8, v8, 8
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: cttz_zero_undef_v16i16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vle16.v v8, (a1)
-; LMULMAX1-NEXT: vle16.v v9, (a0)
-; LMULMAX1-NEXT: li a2, 1
-; LMULMAX1-NEXT: vsub.vx v10, v8, a2
-; LMULMAX1-NEXT: vnot.v v8, v8
-; LMULMAX1-NEXT: vand.vv v8, v8, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX1-NEXT: lui a3, 5
-; LMULMAX1-NEXT: addi a3, a3, 1365
-; LMULMAX1-NEXT: vand.vx v10, v10, a3
-; LMULMAX1-NEXT: vsub.vv v8, v8, v10
-; LMULMAX1-NEXT: lui a4, 3
-; LMULMAX1-NEXT: addi a4, a4, 819
-; LMULMAX1-NEXT: vand.vx v10, v8, a4
-; LMULMAX1-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX1-NEXT: vand.vx v8, v8, a4
-; LMULMAX1-NEXT: vadd.vv v8, v10, v8
-; LMULMAX1-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX1-NEXT: vadd.vv v8, v8, v10
-; LMULMAX1-NEXT: lui a5, 1
-; LMULMAX1-NEXT: addi a5, a5, -241
-; LMULMAX1-NEXT: vand.vx v8, v8, a5
-; LMULMAX1-NEXT: li a6, 257
-; LMULMAX1-NEXT: vmul.vx v8, v8, a6
-; LMULMAX1-NEXT: vsrl.vi v8, v8, 8
-; LMULMAX1-NEXT: vsub.vx v10, v9, a2
-; LMULMAX1-NEXT: vnot.v v9, v9
-; LMULMAX1-NEXT: vand.vv v9, v9, v10
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 1
-; LMULMAX1-NEXT: vand.vx v10, v10, a3
-; LMULMAX1-NEXT: vsub.vv v9, v9, v10
-; LMULMAX1-NEXT: vand.vx v10, v9, a4
-; LMULMAX1-NEXT: vsrl.vi v9, v9, 2
-; LMULMAX1-NEXT: vand.vx v9, v9, a4
-; LMULMAX1-NEXT: vadd.vv v9, v10, v9
-; LMULMAX1-NEXT: vsrl.vi v10, v9, 4
-; LMULMAX1-NEXT: vadd.vv v9, v9, v10
-; LMULMAX1-NEXT: vand.vx v9, v9, a5
-; LMULMAX1-NEXT: vmul.vx v9, v9, a6
-; LMULMAX1-NEXT: vsrl.vi v9, v9, 8
-; LMULMAX1-NEXT: vse16.v v9, (a0)
-; LMULMAX1-NEXT: vse16.v v8, (a1)
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX8-LABEL: cttz_zero_undef_v16i16:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX8-NEXT: vle16.v v8, (a0)
-; LMULMAX8-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX8-NEXT: vand.vv v8, v8, v10
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v12, v8
-; LMULMAX8-NEXT: vnsrl.wi v8, v12, 23
-; LMULMAX8-NEXT: li a1, 127
-; LMULMAX8-NEXT: vsub.vx v8, v8, a1
-; LMULMAX8-NEXT: vse16.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RVI-LABEL: cttz_zero_undef_v16i16:
+; RVI: # %bb.0:
+; RVI-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RVI-NEXT: vle16.v v8, (a0)
+; RVI-NEXT: li a1, 1
+; RVI-NEXT: vsub.vx v10, v8, a1
+; RVI-NEXT: vnot.v v8, v8
+; RVI-NEXT: vand.vv v8, v8, v10
+; RVI-NEXT: vsrl.vi v10, v8, 1
+; RVI-NEXT: lui a1, 5
+; RVI-NEXT: addi a1, a1, 1365
+; RVI-NEXT: vand.vx v10, v10, a1
+; RVI-NEXT: vsub.vv v8, v8, v10
+; RVI-NEXT: lui a1, 3
+; RVI-NEXT: addi a1, a1, 819
+; RVI-NEXT: vand.vx v10, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 2
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: vadd.vv v8, v10, v8
+; RVI-NEXT: vsrl.vi v10, v8, 4
+; RVI-NEXT: vadd.vv v8, v8, v10
+; RVI-NEXT: lui a1, 1
+; RVI-NEXT: addi a1, a1, -241
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: li a1, 257
+; RVI-NEXT: vmul.vx v8, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 8
+; RVI-NEXT: vse16.v v8, (a0)
+; RVI-NEXT: ret
+;
+; RVF-LABEL: cttz_zero_undef_v16i16:
+; RVF: # %bb.0:
+; RVF-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RVF-NEXT: vle16.v v8, (a0)
+; RVF-NEXT: vrsub.vi v10, v8, 0
+; RVF-NEXT: vand.vv v8, v8, v10
+; RVF-NEXT: vfwcvt.f.xu.v v12, v8
+; RVF-NEXT: vnsrl.wi v8, v12, 23
+; RVF-NEXT: li a1, 127
+; RVF-NEXT: vsub.vx v8, v8, a1
+; RVF-NEXT: vse16.v v8, (a0)
+; RVF-NEXT: ret
+;
+; RVD-LABEL: cttz_zero_undef_v16i16:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RVD-NEXT: vle16.v v8, (a0)
+; RVD-NEXT: vrsub.vi v10, v8, 0
+; RVD-NEXT: vand.vv v8, v8, v10
+; RVD-NEXT: vfwcvt.f.xu.v v12, v8
+; RVD-NEXT: vnsrl.wi v8, v12, 23
+; RVD-NEXT: li a1, 127
+; RVD-NEXT: vsub.vx v8, v8, a1
+; RVD-NEXT: vse16.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: cttz_zero_undef_v16i16:
; ZVBB: # %bb.0:
@@ -1955,141 +1321,65 @@ define void @cttz_zero_undef_v16i16(ptr %x, ptr %y) nounwind {
}
define void @cttz_zero_undef_v8i32(ptr %x, ptr %y) nounwind {
-; LMULMAX2-RV32I-LABEL: cttz_zero_undef_v8i32:
-; LMULMAX2-RV32I: # %bb.0:
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: li a1, 1
-; LMULMAX2-RV32I-NEXT: vsub.vx v10, v8, a1
-; LMULMAX2-RV32I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV32I-NEXT: lui a1, 349525
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV32I-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV32I-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: lui a1, 209715
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV32I-NEXT: vand.vx v10, v8, a1
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV32I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: lui a1, 61681
-; LMULMAX2-RV32I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV32I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: lui a1, 4112
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 257
-; LMULMAX2-RV32I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 24
-; LMULMAX2-RV32I-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: ret
-;
-; LMULMAX2-RV64I-LABEL: cttz_zero_undef_v8i32:
-; LMULMAX2-RV64I: # %bb.0:
-; LMULMAX2-RV64I-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV64I-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: li a1, 1
-; LMULMAX2-RV64I-NEXT: vsub.vx v10, v8, a1
-; LMULMAX2-RV64I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV64I-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV64I-NEXT: lui a1, 349525
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV64I-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV64I-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: lui a1, 209715
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV64I-NEXT: vand.vx v10, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: lui a1, 61681
-; LMULMAX2-RV64I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: lui a1, 4112
-; LMULMAX2-RV64I-NEXT: addi a1, a1, 257
-; LMULMAX2-RV64I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 24
-; LMULMAX2-RV64I-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: ret
-;
-; LMULMAX2-RV32F-LABEL: cttz_zero_undef_v8i32:
-; LMULMAX2-RV32F: # %bb.0:
-; LMULMAX2-RV32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV32F-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX2-RV32F-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-RV32F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32F-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV32F-NEXT: fsrm a1
-; LMULMAX2-RV32F-NEXT: vsrl.vi v8, v8, 23
-; LMULMAX2-RV32F-NEXT: li a1, 127
-; LMULMAX2-RV32F-NEXT: vsub.vx v8, v8, a1
-; LMULMAX2-RV32F-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: ret
-;
-; LMULMAX2-RV64F-LABEL: cttz_zero_undef_v8i32:
-; LMULMAX2-RV64F: # %bb.0:
-; LMULMAX2-RV64F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV64F-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX2-RV64F-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-RV64F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64F-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV64F-NEXT: fsrm a1
-; LMULMAX2-RV64F-NEXT: vsrl.vi v8, v8, 23
-; LMULMAX2-RV64F-NEXT: li a1, 127
-; LMULMAX2-RV64F-NEXT: vsub.vx v8, v8, a1
-; LMULMAX2-RV64F-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: ret
-;
-; LMULMAX2-RV32D-LABEL: cttz_zero_undef_v8i32:
-; LMULMAX2-RV32D: # %bb.0:
-; LMULMAX2-RV32D-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV32D-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX2-RV32D-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-RV32D-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32D-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV32D-NEXT: fsrm a1
-; LMULMAX2-RV32D-NEXT: vsrl.vi v8, v8, 23
-; LMULMAX2-RV32D-NEXT: li a1, 127
-; LMULMAX2-RV32D-NEXT: vsub.vx v8, v8, a1
-; LMULMAX2-RV32D-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: ret
-;
-; LMULMAX2-RV64D-LABEL: cttz_zero_undef_v8i32:
-; LMULMAX2-RV64D: # %bb.0:
-; LMULMAX2-RV64D-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV64D-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX2-RV64D-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-RV64D-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64D-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV64D-NEXT: fsrm a1
-; LMULMAX2-RV64D-NEXT: vsrl.vi v8, v8, 23
-; LMULMAX2-RV64D-NEXT: li a1, 127
-; LMULMAX2-RV64D-NEXT: vsub.vx v8, v8, a1
-; LMULMAX2-RV64D-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: ret
-;
-; LMULMAX8-LABEL: cttz_zero_undef_v8i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX8-NEXT: vle32.v v8, (a0)
-; LMULMAX8-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX8-NEXT: vand.vv v8, v8, v10
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v12, v8
-; LMULMAX8-NEXT: li a1, 52
-; LMULMAX8-NEXT: vnsrl.wx v8, v12, a1
-; LMULMAX8-NEXT: li a1, 1023
-; LMULMAX8-NEXT: vsub.vx v8, v8, a1
-; LMULMAX8-NEXT: vse32.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RVI-LABEL: cttz_zero_undef_v8i32:
+; RVI: # %bb.0:
+; RVI-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RVI-NEXT: vle32.v v8, (a0)
+; RVI-NEXT: li a1, 1
+; RVI-NEXT: vsub.vx v10, v8, a1
+; RVI-NEXT: vnot.v v8, v8
+; RVI-NEXT: vand.vv v8, v8, v10
+; RVI-NEXT: vsrl.vi v10, v8, 1
+; RVI-NEXT: lui a1, 349525
+; RVI-NEXT: addi a1, a1, 1365
+; RVI-NEXT: vand.vx v10, v10, a1
+; RVI-NEXT: vsub.vv v8, v8, v10
+; RVI-NEXT: lui a1, 209715
+; RVI-NEXT: addi a1, a1, 819
+; RVI-NEXT: vand.vx v10, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 2
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: vadd.vv v8, v10, v8
+; RVI-NEXT: vsrl.vi v10, v8, 4
+; RVI-NEXT: vadd.vv v8, v8, v10
+; RVI-NEXT: lui a1, 61681
+; RVI-NEXT: addi a1, a1, -241
+; RVI-NEXT: vand.vx v8, v8, a1
+; RVI-NEXT: lui a1, 4112
+; RVI-NEXT: addi a1, a1, 257
+; RVI-NEXT: vmul.vx v8, v8, a1
+; RVI-NEXT: vsrl.vi v8, v8, 24
+; RVI-NEXT: vse32.v v8, (a0)
+; RVI-NEXT: ret
+;
+; RVF-LABEL: cttz_zero_undef_v8i32:
+; RVF: # %bb.0:
+; RVF-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RVF-NEXT: vle32.v v8, (a0)
+; RVF-NEXT: vrsub.vi v10, v8, 0
+; RVF-NEXT: vand.vv v8, v8, v10
+; RVF-NEXT: fsrmi a1, 1
+; RVF-NEXT: vfcvt.f.xu.v v8, v8
+; RVF-NEXT: fsrm a1
+; RVF-NEXT: vsrl.vi v8, v8, 23
+; RVF-NEXT: li a1, 127
+; RVF-NEXT: vsub.vx v8, v8, a1
+; RVF-NEXT: vse32.v v8, (a0)
+; RVF-NEXT: ret
+;
+; RVD-LABEL: cttz_zero_undef_v8i32:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RVD-NEXT: vle32.v v8, (a0)
+; RVD-NEXT: vrsub.vi v10, v8, 0
+; RVD-NEXT: vand.vv v8, v8, v10
+; RVD-NEXT: vfwcvt.f.xu.v v12, v8
+; RVD-NEXT: li a1, 52
+; RVD-NEXT: vnsrl.wx v8, v12, a1
+; RVD-NEXT: li a1, 1023
+; RVD-NEXT: vsub.vx v8, v8, a1
+; RVD-NEXT: vse32.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: cttz_zero_undef_v8i32:
; ZVBB: # %bb.0:
@@ -2106,171 +1396,139 @@ define void @cttz_zero_undef_v8i32(ptr %x, ptr %y) nounwind {
}
define void @cttz_zero_undef_v4i64(ptr %x, ptr %y) nounwind {
-; LMULMAX2-RV32I-LABEL: cttz_zero_undef_v4i64:
-; LMULMAX2-RV32I: # %bb.0:
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: li a1, 1
-; LMULMAX2-RV32I-NEXT: vsub.vx v10, v8, a1
-; LMULMAX2-RV32I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV32I-NEXT: lui a1, 349525
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 1365
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v12, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vand.vv v10, v10, v12
-; LMULMAX2-RV32I-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: lui a1, 209715
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 819
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vand.vv v12, v8, v10
-; LMULMAX2-RV32I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v12, v8
-; LMULMAX2-RV32I-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV32I-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: lui a1, 61681
-; LMULMAX2-RV32I-NEXT: addi a1, a1, -241
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: lui a1, 4112
-; LMULMAX2-RV32I-NEXT: addi a1, a1, 257
-; LMULMAX2-RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32I-NEXT: vmul.vv v8, v8, v10
-; LMULMAX2-RV32I-NEXT: li a1, 56
-; LMULMAX2-RV32I-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32I-NEXT: ret
-;
-; LMULMAX2-RV64I-LABEL: cttz_zero_undef_v4i64:
-; LMULMAX2-RV64I: # %bb.0:
-; LMULMAX2-RV64I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV64I-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: li a1, 1
-; LMULMAX2-RV64I-NEXT: vsub.vx v10, v8, a1
-; LMULMAX2-RV64I-NEXT: vnot.v v8, v8
-; LMULMAX2-RV64I-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 1
-; LMULMAX2-RV64I-NEXT: lui a1, 349525
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, 1365
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vand.vx v10, v10, a1
-; LMULMAX2-RV64I-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: lui a1, 209715
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, 819
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vand.vx v10, v8, a1
-; LMULMAX2-RV64I-NEXT: vsrl.vi v8, v8, 2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-RV64I-NEXT: vsrl.vi v10, v8, 4
-; LMULMAX2-RV64I-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-RV64I-NEXT: lui a1, 61681
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, -241
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: lui a1, 4112
-; LMULMAX2-RV64I-NEXT: addiw a1, a1, 257
-; LMULMAX2-RV64I-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64I-NEXT: add a1, a1, a2
-; LMULMAX2-RV64I-NEXT: vmul.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: li a1, 56
-; LMULMAX2-RV64I-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64I-NEXT: ret
-;
-; LMULMAX2-RV32F-LABEL: cttz_zero_undef_v4i64:
-; LMULMAX2-RV32F: # %bb.0:
-; LMULMAX2-RV32F-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32F-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX2-RV32F-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-RV32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; LMULMAX2-RV32F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32F-NEXT: vfncvt.f.xu.w v10, v8
-; LMULMAX2-RV32F-NEXT: fsrm a1
-; LMULMAX2-RV32F-NEXT: vsrl.vi v8, v10, 23
-; LMULMAX2-RV32F-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; LMULMAX2-RV32F-NEXT: vzext.vf2 v10, v8
-; LMULMAX2-RV32F-NEXT: li a1, 127
-; LMULMAX2-RV32F-NEXT: vsub.vx v8, v10, a1
-; LMULMAX2-RV32F-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32F-NEXT: ret
-;
-; LMULMAX2-RV64F-LABEL: cttz_zero_undef_v4i64:
-; LMULMAX2-RV64F: # %bb.0:
-; LMULMAX2-RV64F-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV64F-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64F-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX2-RV64F-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-RV64F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; LMULMAX2-RV64F-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64F-NEXT: vfncvt.f.xu.w v10, v8
-; LMULMAX2-RV64F-NEXT: fsrm a1
-; LMULMAX2-RV64F-NEXT: vsrl.vi v8, v10, 23
-; LMULMAX2-RV64F-NEXT: li a1, 127
-; LMULMAX2-RV64F-NEXT: vwsubu.vx v10, v8, a1
-; LMULMAX2-RV64F-NEXT: vse64.v v10, (a0)
-; LMULMAX2-RV64F-NEXT: ret
-;
-; LMULMAX2-RV32D-LABEL: cttz_zero_undef_v4i64:
-; LMULMAX2-RV32D: # %bb.0:
-; LMULMAX2-RV32D-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32D-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX2-RV32D-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-RV32D-NEXT: fsrmi a1, 1
-; LMULMAX2-RV32D-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV32D-NEXT: fsrm a1
-; LMULMAX2-RV32D-NEXT: li a1, 52
-; LMULMAX2-RV32D-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV32D-NEXT: li a1, 1023
-; LMULMAX2-RV32D-NEXT: vsub.vx v8, v8, a1
-; LMULMAX2-RV32D-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32D-NEXT: ret
-;
-; LMULMAX2-RV64D-LABEL: cttz_zero_undef_v4i64:
-; LMULMAX2-RV64D: # %bb.0:
-; LMULMAX2-RV64D-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV64D-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX2-RV64D-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-RV64D-NEXT: fsrmi a1, 1
-; LMULMAX2-RV64D-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX2-RV64D-NEXT: fsrm a1
-; LMULMAX2-RV64D-NEXT: li a1, 52
-; LMULMAX2-RV64D-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX2-RV64D-NEXT: li a1, 1023
-; LMULMAX2-RV64D-NEXT: vsub.vx v8, v8, a1
-; LMULMAX2-RV64D-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64D-NEXT: ret
-;
-; LMULMAX8-LABEL: cttz_zero_undef_v4i64:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX8-NEXT: vle64.v v8, (a0)
-; LMULMAX8-NEXT: vrsub.vi v10, v8, 0
-; LMULMAX8-NEXT: vand.vv v8, v8, v10
-; LMULMAX8-NEXT: fsrmi a1, 1
-; LMULMAX8-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX8-NEXT: fsrm a1
-; LMULMAX8-NEXT: li a1, 52
-; LMULMAX8-NEXT: vsrl.vx v8, v8, a1
-; LMULMAX8-NEXT: li a1, 1023
-; LMULMAX8-NEXT: vsub.vx v8, v8, a1
-; LMULMAX8-NEXT: vse64.v v8, (a0)
-; LMULMAX8-NEXT: ret
+; RV32I-LABEL: cttz_zero_undef_v4i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32I-NEXT: vle64.v v8, (a0)
+; RV32I-NEXT: li a1, 1
+; RV32I-NEXT: vsub.vx v10, v8, a1
+; RV32I-NEXT: vnot.v v8, v8
+; RV32I-NEXT: vand.vv v8, v8, v10
+; RV32I-NEXT: vsrl.vi v10, v8, 1
+; RV32I-NEXT: lui a1, 349525
+; RV32I-NEXT: addi a1, a1, 1365
+; RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; RV32I-NEXT: vmv.v.x v12, a1
+; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32I-NEXT: vand.vv v10, v10, v12
+; RV32I-NEXT: vsub.vv v8, v8, v10
+; RV32I-NEXT: lui a1, 209715
+; RV32I-NEXT: addi a1, a1, 819
+; RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; RV32I-NEXT: vmv.v.x v10, a1
+; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32I-NEXT: vand.vv v12, v8, v10
+; RV32I-NEXT: vsrl.vi v8, v8, 2
+; RV32I-NEXT: vand.vv v8, v8, v10
+; RV32I-NEXT: vadd.vv v8, v12, v8
+; RV32I-NEXT: vsrl.vi v10, v8, 4
+; RV32I-NEXT: vadd.vv v8, v8, v10
+; RV32I-NEXT: lui a1, 61681
+; RV32I-NEXT: addi a1, a1, -241
+; RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; RV32I-NEXT: vmv.v.x v10, a1
+; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32I-NEXT: vand.vv v8, v8, v10
+; RV32I-NEXT: lui a1, 4112
+; RV32I-NEXT: addi a1, a1, 257
+; RV32I-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; RV32I-NEXT: vmv.v.x v10, a1
+; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32I-NEXT: vmul.vv v8, v8, v10
+; RV32I-NEXT: li a1, 56
+; RV32I-NEXT: vsrl.vx v8, v8, a1
+; RV32I-NEXT: vse64.v v8, (a0)
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: cttz_zero_undef_v4i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64I-NEXT: vle64.v v8, (a0)
+; RV64I-NEXT: li a1, 1
+; RV64I-NEXT: vsub.vx v10, v8, a1
+; RV64I-NEXT: vnot.v v8, v8
+; RV64I-NEXT: vand.vv v8, v8, v10
+; RV64I-NEXT: vsrl.vi v10, v8, 1
+; RV64I-NEXT: lui a1, 349525
+; RV64I-NEXT: addiw a1, a1, 1365
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vand.vx v10, v10, a1
+; RV64I-NEXT: vsub.vv v8, v8, v10
+; RV64I-NEXT: lui a1, 209715
+; RV64I-NEXT: addiw a1, a1, 819
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vand.vx v10, v8, a1
+; RV64I-NEXT: vsrl.vi v8, v8, 2
+; RV64I-NEXT: vand.vx v8, v8, a1
+; RV64I-NEXT: vadd.vv v8, v10, v8
+; RV64I-NEXT: vsrl.vi v10, v8, 4
+; RV64I-NEXT: vadd.vv v8, v8, v10
+; RV64I-NEXT: lui a1, 61681
+; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vand.vx v8, v8, a1
+; RV64I-NEXT: lui a1, 4112
+; RV64I-NEXT: addiw a1, a1, 257
+; RV64I-NEXT: slli a2, a1, 32
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: vmul.vx v8, v8, a1
+; RV64I-NEXT: li a1, 56
+; RV64I-NEXT: vsrl.vx v8, v8, a1
+; RV64I-NEXT: vse64.v v8, (a0)
+; RV64I-NEXT: ret
+;
+; RV32F-LABEL: cttz_zero_undef_v4i64:
+; RV32F: # %bb.0:
+; RV32F-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32F-NEXT: vle64.v v8, (a0)
+; RV32F-NEXT: vrsub.vi v10, v8, 0
+; RV32F-NEXT: vand.vv v8, v8, v10
+; RV32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV32F-NEXT: fsrmi a1, 1
+; RV32F-NEXT: vfncvt.f.xu.w v10, v8
+; RV32F-NEXT: fsrm a1
+; RV32F-NEXT: vsrl.vi v8, v10, 23
+; RV32F-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32F-NEXT: vzext.vf2 v10, v8
+; RV32F-NEXT: li a1, 127
+; RV32F-NEXT: vsub.vx v8, v10, a1
+; RV32F-NEXT: vse64.v v8, (a0)
+; RV32F-NEXT: ret
+;
+; RV64F-LABEL: cttz_zero_undef_v4i64:
+; RV64F: # %bb.0:
+; RV64F-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64F-NEXT: vle64.v v8, (a0)
+; RV64F-NEXT: vrsub.vi v10, v8, 0
+; RV64F-NEXT: vand.vv v8, v8, v10
+; RV64F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64F-NEXT: fsrmi a1, 1
+; RV64F-NEXT: vfncvt.f.xu.w v10, v8
+; RV64F-NEXT: fsrm a1
+; RV64F-NEXT: vsrl.vi v8, v10, 23
+; RV64F-NEXT: li a1, 127
+; RV64F-NEXT: vwsubu.vx v10, v8, a1
+; RV64F-NEXT: vse64.v v10, (a0)
+; RV64F-NEXT: ret
+;
+; RVD-LABEL: cttz_zero_undef_v4i64:
+; RVD: # %bb.0:
+; RVD-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RVD-NEXT: vle64.v v8, (a0)
+; RVD-NEXT: vrsub.vi v10, v8, 0
+; RVD-NEXT: vand.vv v8, v8, v10
+; RVD-NEXT: fsrmi a1, 1
+; RVD-NEXT: vfcvt.f.xu.v v8, v8
+; RVD-NEXT: fsrm a1
+; RVD-NEXT: li a1, 52
+; RVD-NEXT: vsrl.vx v8, v8, a1
+; RVD-NEXT: li a1, 1023
+; RVD-NEXT: vsub.vx v8, v8, a1
+; RVD-NEXT: vse64.v v8, (a0)
+; RVD-NEXT: ret
;
; ZVBB-LABEL: cttz_zero_undef_v4i64:
; ZVBB: # %bb.0:
@@ -2286,7 +1544,5 @@ define void @cttz_zero_undef_v4i64(ptr %x, ptr %y) nounwind {
ret void
}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; LMULMAX1-RV32: {{.*}}
-; LMULMAX1-RV64: {{.*}}
-; LMULMAX2-RV32: {{.*}}
-; LMULMAX2-RV64: {{.*}}
+; RV32D: {{.*}}
+; RV64D: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll
index 4aaefb2..369f905 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll
@@ -1,8 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
-; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -riscv-v-fixed-length-vector-lmul-max=4 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX4
-; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
-; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -riscv-v-fixed-length-vector-lmul-max=4 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX4
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -verify-machineinstrs < %s | FileCheck %s
define <2 x i16> @sextload_v2i1_v2i16(ptr %x) {
; CHECK-LABEL: sextload_v2i1_v2i16:
@@ -138,46 +136,24 @@ define <4 x i32> @zextload_v4i8_v4i32(ptr %x) {
}
define <4 x i64> @sextload_v4i8_v4i64(ptr %x) {
-; LMULMAX1-LABEL: sextload_v4i8_v4i64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vle8.v v10, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf8 v9, v8
-; LMULMAX1-NEXT: vsext.vf8 v8, v10
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: sextload_v4i8_v4i64:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX4-NEXT: vle8.v v10, (a0)
-; LMULMAX4-NEXT: vsext.vf8 v8, v10
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: sextload_v4i8_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle8.v v10, (a0)
+; CHECK-NEXT: vsext.vf8 v8, v10
+; CHECK-NEXT: ret
%y = load <4 x i8>, ptr %x
%z = sext <4 x i8> %y to <4 x i64>
ret <4 x i64> %z
}
define <4 x i64> @zextload_v4i8_v4i64(ptr %x) {
-; LMULMAX1-LABEL: zextload_v4i8_v4i64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vle8.v v10, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf8 v9, v8
-; LMULMAX1-NEXT: vzext.vf8 v8, v10
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: zextload_v4i8_v4i64:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX4-NEXT: vle8.v v10, (a0)
-; LMULMAX4-NEXT: vzext.vf8 v8, v10
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: zextload_v4i8_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle8.v v10, (a0)
+; CHECK-NEXT: vzext.vf8 v8, v10
+; CHECK-NEXT: ret
%y = load <4 x i8>, ptr %x
%z = zext <4 x i8> %y to <4 x i64>
ret <4 x i64> %z
@@ -208,324 +184,120 @@ define <8 x i16> @zextload_v8i8_v8i16(ptr %x) {
}
define <8 x i32> @sextload_v8i8_v8i32(ptr %x) {
-; LMULMAX1-LABEL: sextload_v8i8_v8i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vle8.v v10, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf4 v9, v8
-; LMULMAX1-NEXT: vsext.vf4 v8, v10
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: sextload_v8i8_v8i32:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX4-NEXT: vle8.v v10, (a0)
-; LMULMAX4-NEXT: vsext.vf4 v8, v10
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: sextload_v8i8_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle8.v v10, (a0)
+; CHECK-NEXT: vsext.vf4 v8, v10
+; CHECK-NEXT: ret
%y = load <8 x i8>, ptr %x
%z = sext <8 x i8> %y to <8 x i32>
ret <8 x i32> %z
}
define <8 x i32> @zextload_v8i8_v8i32(ptr %x) {
-; LMULMAX1-LABEL: zextload_v8i8_v8i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vle8.v v10, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf4 v9, v8
-; LMULMAX1-NEXT: vzext.vf4 v8, v10
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: zextload_v8i8_v8i32:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX4-NEXT: vle8.v v10, (a0)
-; LMULMAX4-NEXT: vzext.vf4 v8, v10
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: zextload_v8i8_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle8.v v10, (a0)
+; CHECK-NEXT: vzext.vf4 v8, v10
+; CHECK-NEXT: ret
%y = load <8 x i8>, ptr %x
%z = zext <8 x i8> %y to <8 x i32>
ret <8 x i32> %z
}
define <8 x i64> @sextload_v8i8_v8i64(ptr %x) {
-; LMULMAX1-LABEL: sextload_v8i8_v8i64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vle8.v v9, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf8 v8, v9
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v11, v9, 4
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf8 v10, v11
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v12, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf8 v11, v12
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v12, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf8 v9, v12
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: sextload_v8i8_v8i64:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; LMULMAX4-NEXT: vle8.v v12, (a0)
-; LMULMAX4-NEXT: vsext.vf8 v8, v12
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: sextload_v8i8_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT: vle8.v v12, (a0)
+; CHECK-NEXT: vsext.vf8 v8, v12
+; CHECK-NEXT: ret
%y = load <8 x i8>, ptr %x
%z = sext <8 x i8> %y to <8 x i64>
ret <8 x i64> %z
}
define <8 x i64> @zextload_v8i8_v8i64(ptr %x) {
-; LMULMAX1-LABEL: zextload_v8i8_v8i64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vle8.v v9, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf8 v8, v9
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v11, v9, 4
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf8 v10, v11
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v12, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf8 v11, v12
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v12, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf8 v9, v12
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: zextload_v8i8_v8i64:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; LMULMAX4-NEXT: vle8.v v12, (a0)
-; LMULMAX4-NEXT: vzext.vf8 v8, v12
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: zextload_v8i8_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT: vle8.v v12, (a0)
+; CHECK-NEXT: vzext.vf8 v8, v12
+; CHECK-NEXT: ret
%y = load <8 x i8>, ptr %x
%z = zext <8 x i8> %y to <8 x i64>
ret <8 x i64> %z
}
define <16 x i16> @sextload_v16i8_v16i16(ptr %x) {
-; LMULMAX1-LABEL: sextload_v16i8_v16i16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vle8.v v10, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v10, 8
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf2 v9, v8
-; LMULMAX1-NEXT: vsext.vf2 v8, v10
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: sextload_v16i8_v16i16:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX4-NEXT: vle8.v v10, (a0)
-; LMULMAX4-NEXT: vsext.vf2 v8, v10
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: sextload_v16i8_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle8.v v10, (a0)
+; CHECK-NEXT: vsext.vf2 v8, v10
+; CHECK-NEXT: ret
%y = load <16 x i8>, ptr %x
%z = sext <16 x i8> %y to <16 x i16>
ret <16 x i16> %z
}
define <16 x i16> @zextload_v16i8_v16i16(ptr %x) {
-; LMULMAX1-LABEL: zextload_v16i8_v16i16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vle8.v v10, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v10, 8
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf2 v9, v8
-; LMULMAX1-NEXT: vzext.vf2 v8, v10
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: zextload_v16i8_v16i16:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX4-NEXT: vle8.v v10, (a0)
-; LMULMAX4-NEXT: vzext.vf2 v8, v10
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: zextload_v16i8_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle8.v v10, (a0)
+; CHECK-NEXT: vzext.vf2 v8, v10
+; CHECK-NEXT: ret
%y = load <16 x i8>, ptr %x
%z = zext <16 x i8> %y to <16 x i16>
ret <16 x i16> %z
}
define <16 x i32> @sextload_v16i8_v16i32(ptr %x) {
-; LMULMAX1-LABEL: sextload_v16i8_v16i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vle8.v v9, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf4 v8, v9
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v11, v9, 8
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf4 v10, v11
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v12, v11, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf4 v11, v12
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v12, v9, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf4 v9, v12
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: sextload_v16i8_v16i32:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; LMULMAX4-NEXT: vle8.v v12, (a0)
-; LMULMAX4-NEXT: vsext.vf4 v8, v12
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: sextload_v16i8_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vle8.v v12, (a0)
+; CHECK-NEXT: vsext.vf4 v8, v12
+; CHECK-NEXT: ret
%y = load <16 x i8>, ptr %x
%z = sext <16 x i8> %y to <16 x i32>
ret <16 x i32> %z
}
define <16 x i32> @zextload_v16i8_v16i32(ptr %x) {
-; LMULMAX1-LABEL: zextload_v16i8_v16i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vle8.v v9, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf4 v8, v9
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v11, v9, 8
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf4 v10, v11
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v12, v11, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf4 v11, v12
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v12, v9, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf4 v9, v12
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: zextload_v16i8_v16i32:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; LMULMAX4-NEXT: vle8.v v12, (a0)
-; LMULMAX4-NEXT: vzext.vf4 v8, v12
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: zextload_v16i8_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vle8.v v12, (a0)
+; CHECK-NEXT: vzext.vf4 v8, v12
+; CHECK-NEXT: ret
%y = load <16 x i8>, ptr %x
%z = zext <16 x i8> %y to <16 x i32>
ret <16 x i32> %z
}
define <16 x i64> @sextload_v16i8_v16i64(ptr %x) {
-; LMULMAX1-LABEL: sextload_v16i8_v16i64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vle8.v v10, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf8 v8, v10
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v11, v10, 8
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf8 v12, v11
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v13, v10, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf8 v9, v13
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v14, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf8 v13, v14
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v11, v11, 4
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf8 v14, v11
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v11, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf8 v15, v11
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v11, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf8 v10, v11
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v16, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf8 v11, v16
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: sextload_v16i8_v16i64:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX4-NEXT: vle8.v v16, (a0)
-; LMULMAX4-NEXT: vsetivli zero, 8, e8, m1, ta, ma
-; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8
-; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; LMULMAX4-NEXT: vsext.vf8 v12, v8
-; LMULMAX4-NEXT: vsext.vf8 v8, v16
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: sextload_v16i8_v16i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; CHECK-NEXT: vle8.v v16, (a0)
+; CHECK-NEXT: vsext.vf8 v8, v16
+; CHECK-NEXT: ret
%y = load <16 x i8>, ptr %x
%z = sext <16 x i8> %y to <16 x i64>
ret <16 x i64> %z
}
define <16 x i64> @zextload_v16i8_v16i64(ptr %x) {
-; LMULMAX1-LABEL: zextload_v16i8_v16i64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vle8.v v10, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf8 v8, v10
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v11, v10, 8
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf8 v12, v11
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v13, v10, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf8 v9, v13
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v14, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf8 v13, v14
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v11, v11, 4
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf8 v14, v11
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v11, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf8 v15, v11
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v11, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf8 v10, v11
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v16, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf8 v11, v16
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: zextload_v16i8_v16i64:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX4-NEXT: vle8.v v16, (a0)
-; LMULMAX4-NEXT: vsetivli zero, 8, e8, m1, ta, ma
-; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8
-; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; LMULMAX4-NEXT: vzext.vf8 v12, v8
-; LMULMAX4-NEXT: vzext.vf8 v8, v16
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: zextload_v16i8_v16i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; CHECK-NEXT: vle8.v v16, (a0)
+; CHECK-NEXT: vzext.vf8 v8, v16
+; CHECK-NEXT: ret
%y = load <16 x i8>, ptr %x
%z = zext <16 x i8> %y to <16 x i64>
ret <16 x i64> %z
@@ -649,46 +421,24 @@ define <4 x i32> @zextload_v4i16_v4i32(ptr %x) {
}
define <4 x i64> @sextload_v4i16_v4i64(ptr %x) {
-; LMULMAX1-LABEL: sextload_v4i16_v4i64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vle16.v v10, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf4 v9, v8
-; LMULMAX1-NEXT: vsext.vf4 v8, v10
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: sextload_v4i16_v4i64:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX4-NEXT: vle16.v v10, (a0)
-; LMULMAX4-NEXT: vsext.vf4 v8, v10
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: sextload_v4i16_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle16.v v10, (a0)
+; CHECK-NEXT: vsext.vf4 v8, v10
+; CHECK-NEXT: ret
%y = load <4 x i16>, ptr %x
%z = sext <4 x i16> %y to <4 x i64>
ret <4 x i64> %z
}
define <4 x i64> @zextload_v4i16_v4i64(ptr %x) {
-; LMULMAX1-LABEL: zextload_v4i16_v4i64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vle16.v v10, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf4 v9, v8
-; LMULMAX1-NEXT: vzext.vf4 v8, v10
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: zextload_v4i16_v4i64:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX4-NEXT: vle16.v v10, (a0)
-; LMULMAX4-NEXT: vzext.vf4 v8, v10
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: zextload_v4i16_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle16.v v10, (a0)
+; CHECK-NEXT: vzext.vf4 v8, v10
+; CHECK-NEXT: ret
%y = load <4 x i16>, ptr %x
%z = zext <4 x i16> %y to <4 x i64>
ret <4 x i64> %z
@@ -707,294 +457,108 @@ define void @truncstore_v8i16_v8i8(<8 x i16> %x, ptr %z) {
}
define <8 x i32> @sextload_v8i16_v8i32(ptr %x) {
-; LMULMAX1-LABEL: sextload_v8i16_v8i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vle16.v v10, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf2 v9, v8
-; LMULMAX1-NEXT: vsext.vf2 v8, v10
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: sextload_v8i16_v8i32:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX4-NEXT: vle16.v v10, (a0)
-; LMULMAX4-NEXT: vsext.vf2 v8, v10
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: sextload_v8i16_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle16.v v10, (a0)
+; CHECK-NEXT: vsext.vf2 v8, v10
+; CHECK-NEXT: ret
%y = load <8 x i16>, ptr %x
%z = sext <8 x i16> %y to <8 x i32>
ret <8 x i32> %z
}
define <8 x i32> @zextload_v8i16_v8i32(ptr %x) {
-; LMULMAX1-LABEL: zextload_v8i16_v8i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vle16.v v10, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf2 v9, v8
-; LMULMAX1-NEXT: vzext.vf2 v8, v10
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: zextload_v8i16_v8i32:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX4-NEXT: vle16.v v10, (a0)
-; LMULMAX4-NEXT: vzext.vf2 v8, v10
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: zextload_v8i16_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle16.v v10, (a0)
+; CHECK-NEXT: vzext.vf2 v8, v10
+; CHECK-NEXT: ret
%y = load <8 x i16>, ptr %x
%z = zext <8 x i16> %y to <8 x i32>
ret <8 x i32> %z
}
define <8 x i64> @sextload_v8i16_v8i64(ptr %x) {
-; LMULMAX1-LABEL: sextload_v8i16_v8i64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vle16.v v9, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf4 v8, v9
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v11, v9, 4
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf4 v10, v11
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v12, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf4 v11, v12
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v12, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf4 v9, v12
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: sextload_v8i16_v8i64:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; LMULMAX4-NEXT: vle16.v v12, (a0)
-; LMULMAX4-NEXT: vsext.vf4 v8, v12
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: sextload_v8i16_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT: vle16.v v12, (a0)
+; CHECK-NEXT: vsext.vf4 v8, v12
+; CHECK-NEXT: ret
%y = load <8 x i16>, ptr %x
%z = sext <8 x i16> %y to <8 x i64>
ret <8 x i64> %z
}
define <8 x i64> @zextload_v8i16_v8i64(ptr %x) {
-; LMULMAX1-LABEL: zextload_v8i16_v8i64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vle16.v v9, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf4 v8, v9
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v11, v9, 4
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf4 v10, v11
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v12, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf4 v11, v12
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v12, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf4 v9, v12
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: zextload_v8i16_v8i64:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; LMULMAX4-NEXT: vle16.v v12, (a0)
-; LMULMAX4-NEXT: vzext.vf4 v8, v12
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: zextload_v8i16_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT: vle16.v v12, (a0)
+; CHECK-NEXT: vzext.vf4 v8, v12
+; CHECK-NEXT: ret
%y = load <8 x i16>, ptr %x
%z = zext <8 x i16> %y to <8 x i64>
ret <8 x i64> %z
}
define void @truncstore_v16i16_v16i8(<16 x i16> %x, ptr %z) {
-; LMULMAX1-LABEL: truncstore_v16i16_v16i8:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 8
-; LMULMAX1-NEXT: vse8.v v8, (a0)
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: truncstore_v16i16_v16i8:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0
-; LMULMAX4-NEXT: vse8.v v10, (a0)
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: truncstore_v16i16_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vnsrl.wi v10, v8, 0
+; CHECK-NEXT: vse8.v v10, (a0)
+; CHECK-NEXT: ret
%y = trunc <16 x i16> %x to <16 x i8>
store <16 x i8> %y, ptr %z
ret void
}
define <16 x i32> @sextload_v16i16_v16i32(ptr %x) {
-; LMULMAX1-LABEL: sextload_v16i16_v16i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vle16.v v10, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vle16.v v12, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf2 v9, v8
-; LMULMAX1-NEXT: vsext.vf2 v8, v10
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v10, v12, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf2 v11, v10
-; LMULMAX1-NEXT: vsext.vf2 v10, v12
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: sextload_v16i16_v16i32:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; LMULMAX4-NEXT: vle16.v v12, (a0)
-; LMULMAX4-NEXT: vsext.vf2 v8, v12
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: sextload_v16i16_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vle16.v v12, (a0)
+; CHECK-NEXT: vsext.vf2 v8, v12
+; CHECK-NEXT: ret
%y = load <16 x i16>, ptr %x
%z = sext <16 x i16> %y to <16 x i32>
ret <16 x i32> %z
}
define <16 x i32> @zextload_v16i16_v16i32(ptr %x) {
-; LMULMAX1-LABEL: zextload_v16i16_v16i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vle16.v v10, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vle16.v v12, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf2 v9, v8
-; LMULMAX1-NEXT: vzext.vf2 v8, v10
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v10, v12, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf2 v11, v10
-; LMULMAX1-NEXT: vzext.vf2 v10, v12
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: zextload_v16i16_v16i32:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; LMULMAX4-NEXT: vle16.v v12, (a0)
-; LMULMAX4-NEXT: vzext.vf2 v8, v12
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: zextload_v16i16_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vle16.v v12, (a0)
+; CHECK-NEXT: vzext.vf2 v8, v12
+; CHECK-NEXT: ret
%y = load <16 x i16>, ptr %x
%z = zext <16 x i16> %y to <16 x i32>
ret <16 x i32> %z
}
define <16 x i64> @sextload_v16i16_v16i64(ptr %x) {
-; LMULMAX1-LABEL: sextload_v16i16_v16i64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vle16.v v9, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vle16.v v13, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf4 v8, v9
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v11, v9, 4
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf4 v10, v11
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v12, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf4 v11, v12
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v12, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf4 v9, v12
-; LMULMAX1-NEXT: vsext.vf4 v12, v13
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v15, v13, 4
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf4 v14, v15
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v16, v15, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf4 v15, v16
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v16, v13, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf4 v13, v16
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: sextload_v16i16_v16i64:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX4-NEXT: vle16.v v16, (a0)
-; LMULMAX4-NEXT: vsetivli zero, 8, e16, m2, ta, ma
-; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8
-; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; LMULMAX4-NEXT: vsext.vf4 v12, v8
-; LMULMAX4-NEXT: vsext.vf4 v8, v16
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: sextload_v16i16_v16i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; CHECK-NEXT: vle16.v v16, (a0)
+; CHECK-NEXT: vsext.vf4 v8, v16
+; CHECK-NEXT: ret
%y = load <16 x i16>, ptr %x
%z = sext <16 x i16> %y to <16 x i64>
ret <16 x i64> %z
}
define <16 x i64> @zextload_v16i16_v16i64(ptr %x) {
-; LMULMAX1-LABEL: zextload_v16i16_v16i64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vle16.v v9, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vle16.v v13, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf4 v8, v9
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v11, v9, 4
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf4 v10, v11
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v12, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf4 v11, v12
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v12, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf4 v9, v12
-; LMULMAX1-NEXT: vzext.vf4 v12, v13
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v15, v13, 4
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf4 v14, v15
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v16, v15, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf4 v15, v16
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v16, v13, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf4 v13, v16
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: zextload_v16i16_v16i64:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX4-NEXT: vle16.v v16, (a0)
-; LMULMAX4-NEXT: vsetivli zero, 8, e16, m2, ta, ma
-; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8
-; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; LMULMAX4-NEXT: vzext.vf4 v12, v8
-; LMULMAX4-NEXT: vzext.vf4 v8, v16
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: zextload_v16i16_v16i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; CHECK-NEXT: vle16.v v16, (a0)
+; CHECK-NEXT: vzext.vf4 v8, v16
+; CHECK-NEXT: ret
%y = load <16 x i16>, ptr %x
%z = zext <16 x i16> %y to <16 x i64>
ret <16 x i64> %z
@@ -1077,323 +641,124 @@ define void @truncstore_v4i32_v4i16(<4 x i32> %x, ptr %z) {
}
define <4 x i64> @sextload_v4i32_v4i64(ptr %x) {
-; LMULMAX1-LABEL: sextload_v4i32_v4i64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vle32.v v10, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf2 v9, v8
-; LMULMAX1-NEXT: vsext.vf2 v8, v10
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: sextload_v4i32_v4i64:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX4-NEXT: vle32.v v10, (a0)
-; LMULMAX4-NEXT: vsext.vf2 v8, v10
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: sextload_v4i32_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle32.v v10, (a0)
+; CHECK-NEXT: vsext.vf2 v8, v10
+; CHECK-NEXT: ret
%y = load <4 x i32>, ptr %x
%z = sext <4 x i32> %y to <4 x i64>
ret <4 x i64> %z
}
define <4 x i64> @zextload_v4i32_v4i64(ptr %x) {
-; LMULMAX1-LABEL: zextload_v4i32_v4i64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vle32.v v10, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf2 v9, v8
-; LMULMAX1-NEXT: vzext.vf2 v8, v10
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: zextload_v4i32_v4i64:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX4-NEXT: vle32.v v10, (a0)
-; LMULMAX4-NEXT: vzext.vf2 v8, v10
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: zextload_v4i32_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle32.v v10, (a0)
+; CHECK-NEXT: vzext.vf2 v8, v10
+; CHECK-NEXT: ret
%y = load <4 x i32>, ptr %x
%z = zext <4 x i32> %y to <4 x i64>
ret <4 x i64> %z
}
define void @truncstore_v8i32_v8i8(<8 x i32> %x, ptr %z) {
-; LMULMAX1-LABEL: truncstore_v8i32_v8i8:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
-; LMULMAX1-NEXT: vse8.v v8, (a0)
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: truncstore_v8i32_v8i8:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0
-; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v8, v10, 0
-; LMULMAX4-NEXT: vse8.v v8, (a0)
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: truncstore_v8i32_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vnsrl.wi v10, v8, 0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v10, 0
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%y = trunc <8 x i32> %x to <8 x i8>
store <8 x i8> %y, ptr %z
ret void
}
define void @truncstore_v8i32_v8i16(<8 x i32> %x, ptr %z) {
-; LMULMAX1-LABEL: truncstore_v8i32_v8i16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
-; LMULMAX1-NEXT: vse16.v v8, (a0)
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: truncstore_v8i32_v8i16:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0
-; LMULMAX4-NEXT: vse16.v v10, (a0)
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: truncstore_v8i32_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vnsrl.wi v10, v8, 0
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%y = trunc <8 x i32> %x to <8 x i16>
store <8 x i16> %y, ptr %z
ret void
}
define <8 x i64> @sextload_v8i32_v8i64(ptr %x) {
-; LMULMAX1-LABEL: sextload_v8i32_v8i64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vle32.v v10, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vle32.v v12, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf2 v9, v8
-; LMULMAX1-NEXT: vsext.vf2 v8, v10
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v10, v12, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf2 v11, v10
-; LMULMAX1-NEXT: vsext.vf2 v10, v12
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: sextload_v8i32_v8i64:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; LMULMAX4-NEXT: vle32.v v12, (a0)
-; LMULMAX4-NEXT: vsext.vf2 v8, v12
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: sextload_v8i32_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT: vle32.v v12, (a0)
+; CHECK-NEXT: vsext.vf2 v8, v12
+; CHECK-NEXT: ret
%y = load <8 x i32>, ptr %x
%z = sext <8 x i32> %y to <8 x i64>
ret <8 x i64> %z
}
define <8 x i64> @zextload_v8i32_v8i64(ptr %x) {
-; LMULMAX1-LABEL: zextload_v8i32_v8i64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vle32.v v10, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vle32.v v12, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf2 v9, v8
-; LMULMAX1-NEXT: vzext.vf2 v8, v10
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v10, v12, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf2 v11, v10
-; LMULMAX1-NEXT: vzext.vf2 v10, v12
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: zextload_v8i32_v8i64:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; LMULMAX4-NEXT: vle32.v v12, (a0)
-; LMULMAX4-NEXT: vzext.vf2 v8, v12
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: zextload_v8i32_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT: vle32.v v12, (a0)
+; CHECK-NEXT: vzext.vf2 v8, v12
+; CHECK-NEXT: ret
%y = load <8 x i32>, ptr %x
%z = zext <8 x i32> %y to <8 x i64>
ret <8 x i64> %z
}
define void @truncstore_v16i32_v16i8(<16 x i32> %x, ptr %z) {
-; LMULMAX1-LABEL: truncstore_v16i32_v16i8:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v10, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 8
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 12
-; LMULMAX1-NEXT: vse8.v v8, (a0)
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: truncstore_v16i32_v16i8:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v12, v8, 0
-; LMULMAX4-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v8, v12, 0
-; LMULMAX4-NEXT: vse8.v v8, (a0)
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: truncstore_v16i32_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vnsrl.wi v12, v8, 0
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v12, 0
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%y = trunc <16 x i32> %x to <16 x i8>
store <16 x i8> %y, ptr %z
ret void
}
define void @truncstore_v16i32_v16i16(<16 x i32> %x, ptr %z) {
-; LMULMAX1-LABEL: truncstore_v16i32_v16i16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0
-; LMULMAX1-NEXT: vnsrl.wi v10, v10, 0
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v10, v9, 4
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vse16.v v10, (a1)
-; LMULMAX1-NEXT: vse16.v v8, (a0)
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: truncstore_v16i32_v16i16:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v12, v8, 0
-; LMULMAX4-NEXT: vse16.v v12, (a0)
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: truncstore_v16i32_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vnsrl.wi v12, v8, 0
+; CHECK-NEXT: vse16.v v12, (a0)
+; CHECK-NEXT: ret
%y = trunc <16 x i32> %x to <16 x i16>
store <16 x i16> %y, ptr %z
ret void
}
define <16 x i64> @sextload_v16i32_v16i64(ptr %x) {
-; LMULMAX1-LABEL: sextload_v16i32_v16i64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, a0, 48
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vle32.v v16, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 32
-; LMULMAX1-NEXT: vle32.v v14, (a1)
-; LMULMAX1-NEXT: vle32.v v10, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vle32.v v12, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf2 v9, v8
-; LMULMAX1-NEXT: vsext.vf2 v8, v10
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v10, v12, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf2 v11, v10
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v10, v14, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf2 v13, v10
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v10, v16, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf2 v15, v10
-; LMULMAX1-NEXT: vsext.vf2 v10, v12
-; LMULMAX1-NEXT: vsext.vf2 v12, v14
-; LMULMAX1-NEXT: vsext.vf2 v14, v16
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: sextload_v16i32_v16i64:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; LMULMAX4-NEXT: vle32.v v16, (a0)
-; LMULMAX4-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8
-; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; LMULMAX4-NEXT: vsext.vf2 v12, v8
-; LMULMAX4-NEXT: vsext.vf2 v8, v16
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: sextload_v16i32_v16i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; CHECK-NEXT: vle32.v v16, (a0)
+; CHECK-NEXT: vsext.vf2 v8, v16
+; CHECK-NEXT: ret
%y = load <16 x i32>, ptr %x
%z = sext <16 x i32> %y to <16 x i64>
ret <16 x i64> %z
}
define <16 x i64> @zextload_v16i32_v16i64(ptr %x) {
-; LMULMAX1-LABEL: zextload_v16i32_v16i64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, a0, 48
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vle32.v v16, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 32
-; LMULMAX1-NEXT: vle32.v v14, (a1)
-; LMULMAX1-NEXT: vle32.v v10, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vle32.v v12, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf2 v9, v8
-; LMULMAX1-NEXT: vzext.vf2 v8, v10
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v10, v12, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf2 v11, v10
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v10, v14, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf2 v13, v10
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v10, v16, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vzext.vf2 v15, v10
-; LMULMAX1-NEXT: vzext.vf2 v10, v12
-; LMULMAX1-NEXT: vzext.vf2 v12, v14
-; LMULMAX1-NEXT: vzext.vf2 v14, v16
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: zextload_v16i32_v16i64:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; LMULMAX4-NEXT: vle32.v v16, (a0)
-; LMULMAX4-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8
-; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; LMULMAX4-NEXT: vzext.vf2 v12, v8
-; LMULMAX4-NEXT: vzext.vf2 v8, v16
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: zextload_v16i32_v16i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; CHECK-NEXT: vle32.v v16, (a0)
+; CHECK-NEXT: vzext.vf2 v8, v16
+; CHECK-NEXT: ret
%y = load <16 x i32>, ptr %x
%z = zext <16 x i32> %y to <16 x i64>
ret <16 x i64> %z
@@ -1442,415 +807,126 @@ define void @truncstore_v2i64_v2i32(<2 x i64> %x, ptr %z) {
}
define void @truncstore_v4i64_v4i8(<4 x i64> %x, ptr %z) {
-; LMULMAX1-LABEL: truncstore_v4i64_v4i8:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
-; LMULMAX1-NEXT: vse8.v v8, (a0)
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: truncstore_v4i64_v4i8:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0
-; LMULMAX4-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v8, v10, 0
-; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX4-NEXT: vse8.v v8, (a0)
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: truncstore_v4i64_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vnsrl.wi v10, v8, 0
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v10, 0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v8, 0
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%y = trunc <4 x i64> %x to <4 x i8>
store <4 x i8> %y, ptr %z
ret void
}
define void @truncstore_v4i64_v4i16(<4 x i64> %x, ptr %z) {
-; LMULMAX1-LABEL: truncstore_v4i64_v4i16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
-; LMULMAX1-NEXT: vse16.v v8, (a0)
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: truncstore_v4i64_v4i16:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0
-; LMULMAX4-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v8, v10, 0
-; LMULMAX4-NEXT: vse16.v v8, (a0)
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: truncstore_v4i64_v4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vnsrl.wi v10, v8, 0
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v10, 0
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%y = trunc <4 x i64> %x to <4 x i16>
store <4 x i16> %y, ptr %z
ret void
}
define void @truncstore_v4i64_v4i32(<4 x i64> %x, ptr %z) {
-; LMULMAX1-LABEL: truncstore_v4i64_v4i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: truncstore_v4i64_v4i32:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0
-; LMULMAX4-NEXT: vse32.v v10, (a0)
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: truncstore_v4i64_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vnsrl.wi v10, v8, 0
+; CHECK-NEXT: vse32.v v10, (a0)
+; CHECK-NEXT: ret
%y = trunc <4 x i64> %x to <4 x i32>
store <4 x i32> %y, ptr %z
ret void
}
define void @truncstore_v8i64_v8i8(<8 x i64> %x, ptr %z) {
-; LMULMAX1-LABEL: truncstore_v8i64_v8i8:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v10, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 6
-; LMULMAX1-NEXT: vse8.v v8, (a0)
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: truncstore_v8i64_v8i8:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v12, v8, 0
-; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v8, v12, 0
-; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX4-NEXT: vse8.v v8, (a0)
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: truncstore_v8i64_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vnsrl.wi v12, v8, 0
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v12, 0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v8, 0
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%y = trunc <8 x i64> %x to <8 x i8>
store <8 x i8> %y, ptr %z
ret void
}
define void @truncstore_v8i64_v8i16(<8 x i64> %x, ptr %z) {
-; LMULMAX1-LABEL: truncstore_v8i64_v8i16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v10, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 6
-; LMULMAX1-NEXT: vse16.v v8, (a0)
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: truncstore_v8i64_v8i16:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v12, v8, 0
-; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v8, v12, 0
-; LMULMAX4-NEXT: vse16.v v8, (a0)
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: truncstore_v8i64_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vnsrl.wi v12, v8, 0
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v12, 0
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%y = trunc <8 x i64> %x to <8 x i16>
store <8 x i16> %y, ptr %z
ret void
}
define void @truncstore_v8i64_v8i32(<8 x i64> %x, ptr %z) {
-; LMULMAX1-LABEL: truncstore_v8i64_v8i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0
-; LMULMAX1-NEXT: vnsrl.wi v10, v10, 0
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v10, v9, 2
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vse32.v v10, (a1)
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: truncstore_v8i64_v8i32:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v12, v8, 0
-; LMULMAX4-NEXT: vse32.v v12, (a0)
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: truncstore_v8i64_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vnsrl.wi v12, v8, 0
+; CHECK-NEXT: vse32.v v12, (a0)
+; CHECK-NEXT: ret
%y = trunc <8 x i64> %x to <8 x i32>
store <8 x i32> %y, ptr %z
ret void
}
define void @truncstore_v16i64_v16i8(<16 x i64> %x, ptr %z) {
-; LMULMAX1-LABEL: truncstore_v16i64_v16i8:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, m1, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v10, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetivli zero, 6, e8, m1, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 6
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v12, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetivli zero, 10, e8, m1, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 8
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v13, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 10
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v14, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetivli zero, 14, e8, m1, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 12
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v15, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 14
-; LMULMAX1-NEXT: vse8.v v8, (a0)
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: truncstore_v16i64_v16i8:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v16, v12, 0
-; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v12, v16, 0
-; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v12, v12, 0
-; LMULMAX4-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v14, v8, 0
-; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v8, v14, 0
-; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX4-NEXT: vslideup.vi v8, v12, 8
-; LMULMAX4-NEXT: vse8.v v8, (a0)
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: truncstore_v16i64_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vnsrl.wi v16, v8, 0
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v16, 0
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT: vnsrl.wi v10, v8, 0
+; CHECK-NEXT: vse8.v v10, (a0)
+; CHECK-NEXT: ret
%y = trunc <16 x i64> %x to <16 x i8>
store <16 x i8> %y, ptr %z
ret void
}
define void @truncstore_v16i64_v16i16(<16 x i64> %x, ptr %z) {
-; LMULMAX1-LABEL: truncstore_v16i64_v16i16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v10, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 6
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v13, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v10, v12, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v10, v10, 0
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v10, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v14, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v10, v9, 4
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v15, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v10, v9, 6
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vse16.v v10, (a1)
-; LMULMAX1-NEXT: vse16.v v8, (a0)
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: truncstore_v16i64_v16i16:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v16, v12, 0
-; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v12, v16, 0
-; LMULMAX4-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v14, v8, 0
-; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v8, v14, 0
-; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX4-NEXT: vslideup.vi v8, v12, 8
-; LMULMAX4-NEXT: vse16.v v8, (a0)
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: truncstore_v16i64_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vnsrl.wi v16, v8, 0
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v16, 0
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%y = trunc <16 x i64> %x to <16 x i16>
store <16 x i16> %y, ptr %z
ret void
}
define void @truncstore_v16i64_v16i32(<16 x i64> %x, ptr %z) {
-; LMULMAX1-LABEL: truncstore_v16i64_v16i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0
-; LMULMAX1-NEXT: vnsrl.wi v10, v10, 0
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v10, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v13, 0
-; LMULMAX1-NEXT: vnsrl.wi v11, v12, 0
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v11, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v15, 0
-; LMULMAX1-NEXT: vnsrl.wi v12, v14, 0
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v12, v9, 2
-; LMULMAX1-NEXT: addi a1, a0, 48
-; LMULMAX1-NEXT: vse32.v v12, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 32
-; LMULMAX1-NEXT: vse32.v v11, (a1)
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vse32.v v10, (a1)
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX4-LABEL: truncstore_v16i64_v16i32:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX4-NEXT: vnsrl.wi v16, v12, 0
-; LMULMAX4-NEXT: vnsrl.wi v12, v8, 0
-; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; LMULMAX4-NEXT: vslideup.vi v12, v16, 8
-; LMULMAX4-NEXT: vse32.v v12, (a0)
-; LMULMAX4-NEXT: ret
+; CHECK-LABEL: truncstore_v16i64_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vnsrl.wi v16, v8, 0
+; CHECK-NEXT: vse32.v v16, (a0)
+; CHECK-NEXT: ret
%y = trunc <16 x i64> %x to <16 x i32>
store <16 x i32> %y, ptr %z
ret void
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
index 8b51a38..c49b1a7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
@@ -76,10 +76,8 @@ define void @extract_v1i32_v8i32_4(ptr %x, ptr %y) {
; CHECK-KNOWNVLEN128-LABEL: extract_v1i32_v8i32_4:
; CHECK-KNOWNVLEN128: # %bb.0:
; CHECK-KNOWNVLEN128-NEXT: vl2re32.v v8, (a0)
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vslidedown.vi v8, v8, 4
; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vse32.v v8, (a1)
+; CHECK-KNOWNVLEN128-NEXT: vse32.v v9, (a1)
; CHECK-KNOWNVLEN128-NEXT: ret
%a = load <8 x i32>, ptr %x
%c = call <1 x i32> @llvm.vector.extract.v1i32.v8i32(<8 x i32> %a, i64 4)
@@ -101,8 +99,8 @@ define void @extract_v1i32_v8i32_5(ptr %x, ptr %y) {
; CHECK-KNOWNVLEN128-LABEL: extract_v1i32_v8i32_5:
; CHECK-KNOWNVLEN128: # %bb.0:
; CHECK-KNOWNVLEN128-NEXT: vl2re32.v v8, (a0)
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vslidedown.vi v8, v8, 5
+; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-KNOWNVLEN128-NEXT: vslidedown.vi v8, v9, 1
; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-KNOWNVLEN128-NEXT: vse32.v v8, (a1)
; CHECK-KNOWNVLEN128-NEXT: ret
@@ -172,10 +170,8 @@ define void @extract_v2i32_v8i32_4(ptr %x, ptr %y) {
; CHECK-KNOWNVLEN128-LABEL: extract_v2i32_v8i32_4:
; CHECK-KNOWNVLEN128: # %bb.0:
; CHECK-KNOWNVLEN128-NEXT: vl2re32.v v8, (a0)
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e32, m2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vslidedown.vi v8, v8, 4
; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vse32.v v8, (a1)
+; CHECK-KNOWNVLEN128-NEXT: vse32.v v9, (a1)
; CHECK-KNOWNVLEN128-NEXT: ret
%a = load <8 x i32>, ptr %x
%c = call <2 x i32> @llvm.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 4)
@@ -197,8 +193,8 @@ define void @extract_v2i32_v8i32_6(ptr %x, ptr %y) {
; CHECK-KNOWNVLEN128-LABEL: extract_v2i32_v8i32_6:
; CHECK-KNOWNVLEN128: # %bb.0:
; CHECK-KNOWNVLEN128-NEXT: vl2re32.v v8, (a0)
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e32, m2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vslidedown.vi v8, v8, 6
+; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e32, m1, ta, ma
+; CHECK-KNOWNVLEN128-NEXT: vslidedown.vi v8, v9, 2
; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-KNOWNVLEN128-NEXT: vse32.v v8, (a1)
; CHECK-KNOWNVLEN128-NEXT: ret
@@ -234,39 +230,59 @@ define void @extract_v2i32_nxv16i32_2(<vscale x 16 x i32> %x, ptr %y) {
}
define void @extract_v2i32_nxv16i32_4(<vscale x 16 x i32> %x, ptr %y) {
-; CHECK-LABEL: extract_v2i32_nxv16i32_4:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 4
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vse32.v v8, (a0)
-; CHECK-NEXT: ret
+; CHECK-V-LABEL: extract_v2i32_nxv16i32_4:
+; CHECK-V: # %bb.0:
+; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, ta, ma
+; CHECK-V-NEXT: vslidedown.vi v8, v8, 4
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-V-NEXT: vse32.v v8, (a0)
+; CHECK-V-NEXT: ret
+;
+; CHECK-KNOWNVLEN128-LABEL: extract_v2i32_nxv16i32_4:
+; CHECK-KNOWNVLEN128: # %bb.0:
+; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-KNOWNVLEN128-NEXT: vse32.v v9, (a0)
+; CHECK-KNOWNVLEN128-NEXT: ret
%c = call <2 x i32> @llvm.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %x, i64 4)
store <2 x i32> %c, ptr %y
ret void
}
define void @extract_v2i32_nxv16i32_6(<vscale x 16 x i32> %x, ptr %y) {
-; CHECK-LABEL: extract_v2i32_nxv16i32_6:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 6
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vse32.v v8, (a0)
-; CHECK-NEXT: ret
+; CHECK-V-LABEL: extract_v2i32_nxv16i32_6:
+; CHECK-V: # %bb.0:
+; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, ta, ma
+; CHECK-V-NEXT: vslidedown.vi v8, v8, 6
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-V-NEXT: vse32.v v8, (a0)
+; CHECK-V-NEXT: ret
+;
+; CHECK-KNOWNVLEN128-LABEL: extract_v2i32_nxv16i32_6:
+; CHECK-KNOWNVLEN128: # %bb.0:
+; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e32, m1, ta, ma
+; CHECK-KNOWNVLEN128-NEXT: vslidedown.vi v8, v9, 2
+; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-KNOWNVLEN128-NEXT: vse32.v v8, (a0)
+; CHECK-KNOWNVLEN128-NEXT: ret
%c = call <2 x i32> @llvm.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %x, i64 6)
store <2 x i32> %c, ptr %y
ret void
}
define void @extract_v2i32_nxv16i32_8(<vscale x 16 x i32> %x, ptr %y) {
-; CHECK-LABEL: extract_v2i32_nxv16i32_8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 8
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vse32.v v8, (a0)
-; CHECK-NEXT: ret
+; CHECK-V-LABEL: extract_v2i32_nxv16i32_8:
+; CHECK-V: # %bb.0:
+; CHECK-V-NEXT: vsetivli zero, 2, e32, m4, ta, ma
+; CHECK-V-NEXT: vslidedown.vi v8, v8, 8
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-V-NEXT: vse32.v v8, (a0)
+; CHECK-V-NEXT: ret
+;
+; CHECK-KNOWNVLEN128-LABEL: extract_v2i32_nxv16i32_8:
+; CHECK-KNOWNVLEN128: # %bb.0:
+; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-KNOWNVLEN128-NEXT: vse32.v v10, (a0)
+; CHECK-KNOWNVLEN128-NEXT: ret
%c = call <2 x i32> @llvm.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %x, i64 8)
store <2 x i32> %c, ptr %y
ret void
@@ -333,9 +349,7 @@ define void @extract_v8i32_nxv16i32_8(<vscale x 16 x i32> %x, ptr %y) {
;
; CHECK-KNOWNVLEN128-LABEL: extract_v8i32_nxv16i32_8:
; CHECK-KNOWNVLEN128: # %bb.0:
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vslidedown.vi v8, v8, 8
-; CHECK-KNOWNVLEN128-NEXT: vs2r.v v8, (a0)
+; CHECK-KNOWNVLEN128-NEXT: vs2r.v v10, (a0)
; CHECK-KNOWNVLEN128-NEXT: ret
%c = call <8 x i32> @llvm.vector.extract.v8i32.nxv16i32(<vscale x 16 x i32> %x, i64 8)
store <8 x i32> %c, ptr %y
@@ -611,9 +625,8 @@ define void @extract_v2i1_v64i1_42(ptr %x, ptr %y) {
; CHECK-KNOWNVLEN128-NEXT: vlm.v v0, (a0)
; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v8, 0
; CHECK-KNOWNVLEN128-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-KNOWNVLEN128-NEXT: li a0, 42
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, m4, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vslidedown.vx v8, v8, a0
+; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, m1, ta, ma
+; CHECK-KNOWNVLEN128-NEXT: vslidedown.vi v8, v10, 10
; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-KNOWNVLEN128-NEXT: vmsne.vi v0, v8, 0
; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v8, 0
@@ -741,51 +754,91 @@ define void @extract_v2i1_nxv64i1_2(<vscale x 64 x i1> %x, ptr %y) {
}
define void @extract_v2i1_nxv64i1_42(<vscale x 64 x i1> %x, ptr %y) {
-; CHECK-LABEL: extract_v2i1_nxv64i1_42:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
-; CHECK-NEXT: vmv.v.i v8, 0
-; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT: li a1, 42
-; CHECK-NEXT: vsetivli zero, 2, e8, m4, ta, ma
-; CHECK-NEXT: vslidedown.vx v8, v8, a1
-; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; CHECK-NEXT: vmsne.vi v0, v8, 0
-; CHECK-NEXT: vmv.v.i v8, 0
-; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
-; CHECK-NEXT: vmv.v.v v9, v8
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT: vmsne.vi v8, v9, 0
-; CHECK-NEXT: vsm.v v8, (a0)
-; CHECK-NEXT: ret
+; CHECK-V-LABEL: extract_v2i1_nxv64i1_42:
+; CHECK-V: # %bb.0:
+; CHECK-V-NEXT: vsetvli a1, zero, e8, m8, ta, ma
+; CHECK-V-NEXT: vmv.v.i v8, 0
+; CHECK-V-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-V-NEXT: li a1, 42
+; CHECK-V-NEXT: vsetivli zero, 2, e8, m4, ta, ma
+; CHECK-V-NEXT: vslidedown.vx v8, v8, a1
+; CHECK-V-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-V-NEXT: vmsne.vi v0, v8, 0
+; CHECK-V-NEXT: vmv.v.i v8, 0
+; CHECK-V-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-V-NEXT: vmv.v.i v9, 0
+; CHECK-V-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
+; CHECK-V-NEXT: vmv.v.v v9, v8
+; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-V-NEXT: vmsne.vi v8, v9, 0
+; CHECK-V-NEXT: vsm.v v8, (a0)
+; CHECK-V-NEXT: ret
+;
+; CHECK-KNOWNVLEN128-LABEL: extract_v2i1_nxv64i1_42:
+; CHECK-KNOWNVLEN128: # %bb.0:
+; CHECK-KNOWNVLEN128-NEXT: vsetvli a1, zero, e8, m8, ta, ma
+; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v8, 0
+; CHECK-KNOWNVLEN128-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, m1, ta, ma
+; CHECK-KNOWNVLEN128-NEXT: vslidedown.vi v8, v10, 10
+; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-KNOWNVLEN128-NEXT: vmsne.vi v0, v8, 0
+; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v8, 0
+; CHECK-KNOWNVLEN128-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v9, 0
+; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
+; CHECK-KNOWNVLEN128-NEXT: vmv.v.v v9, v8
+; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-KNOWNVLEN128-NEXT: vmsne.vi v8, v9, 0
+; CHECK-KNOWNVLEN128-NEXT: vsm.v v8, (a0)
+; CHECK-KNOWNVLEN128-NEXT: ret
%c = call <2 x i1> @llvm.vector.extract.v2i1.nxv64i1(<vscale x 64 x i1> %x, i64 42)
store <2 x i1> %c, ptr %y
ret void
}
define void @extract_v2i1_nxv32i1_26(<vscale x 32 x i1> %x, ptr %y) {
-; CHECK-LABEL: extract_v2i1_nxv32i1_26:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
-; CHECK-NEXT: vmv.v.i v8, 0
-; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT: vsetivli zero, 2, e8, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 26
-; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; CHECK-NEXT: vmsne.vi v0, v8, 0
-; CHECK-NEXT: vmv.v.i v8, 0
-; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
-; CHECK-NEXT: vmv.v.v v9, v8
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT: vmsne.vi v8, v9, 0
-; CHECK-NEXT: vsm.v v8, (a0)
-; CHECK-NEXT: ret
+; CHECK-V-LABEL: extract_v2i1_nxv32i1_26:
+; CHECK-V: # %bb.0:
+; CHECK-V-NEXT: vsetvli a1, zero, e8, m4, ta, ma
+; CHECK-V-NEXT: vmv.v.i v8, 0
+; CHECK-V-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-V-NEXT: vsetivli zero, 2, e8, m2, ta, ma
+; CHECK-V-NEXT: vslidedown.vi v8, v8, 26
+; CHECK-V-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-V-NEXT: vmsne.vi v0, v8, 0
+; CHECK-V-NEXT: vmv.v.i v8, 0
+; CHECK-V-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-V-NEXT: vmv.v.i v9, 0
+; CHECK-V-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
+; CHECK-V-NEXT: vmv.v.v v9, v8
+; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-V-NEXT: vmsne.vi v8, v9, 0
+; CHECK-V-NEXT: vsm.v v8, (a0)
+; CHECK-V-NEXT: ret
+;
+; CHECK-KNOWNVLEN128-LABEL: extract_v2i1_nxv32i1_26:
+; CHECK-KNOWNVLEN128: # %bb.0:
+; CHECK-KNOWNVLEN128-NEXT: vsetvli a1, zero, e8, m4, ta, ma
+; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v8, 0
+; CHECK-KNOWNVLEN128-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, m1, ta, ma
+; CHECK-KNOWNVLEN128-NEXT: vslidedown.vi v8, v9, 10
+; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-KNOWNVLEN128-NEXT: vmsne.vi v0, v8, 0
+; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v8, 0
+; CHECK-KNOWNVLEN128-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v9, 0
+; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
+; CHECK-KNOWNVLEN128-NEXT: vmv.v.v v9, v8
+; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-KNOWNVLEN128-NEXT: vmsne.vi v8, v9, 0
+; CHECK-KNOWNVLEN128-NEXT: vsm.v v8, (a0)
+; CHECK-KNOWNVLEN128-NEXT: ret
%c = call <2 x i1> @llvm.vector.extract.v2i1.nxv32i1(<vscale x 32 x i1> %x, i64 26)
store <2 x i1> %c, ptr %y
ret void
@@ -804,6 +857,16 @@ define void @extract_v8i1_nxv32i1_16(<vscale x 32 x i1> %x, ptr %y) {
ret void
}
+define <1 x i64> @extract_v1i64_v2i64_1(<2 x i64> %x) {
+; CHECK-LABEL: extract_v1i64_v2i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 1
+; CHECK-NEXT: ret
+ %v = call <1 x i64> @llvm.vector.extract.v1i64.v2i64(<2 x i64> %x, i64 1)
+ ret <1 x i64> %v
+}
+
declare <2 x i1> @llvm.vector.extract.v2i1.v64i1(<64 x i1> %vec, i64 %idx)
declare <8 x i1> @llvm.vector.extract.v8i1.v64i1(<64 x i1> %vec, i64 %idx)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
index a2bd862..8e214e4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
@@ -1399,15 +1399,17 @@ define <2 x double> @vid_step2_v2f64() {
define <8 x float> @buildvec_v8f32_zvl256(float %e0, float %e1, float %e2, float %e3, float %e4, float %e5, float %e6, float %e7) vscale_range(4, 128) {
; CHECK-LABEL: buildvec_v8f32_zvl256:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, ma
+; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, mu
; CHECK-NEXT: vfmv.v.f v8, fa0
; CHECK-NEXT: vfslide1down.vf v8, v8, fa1
; CHECK-NEXT: vfslide1down.vf v8, v8, fa2
-; CHECK-NEXT: vfslide1down.vf v8, v8, fa3
-; CHECK-NEXT: vfslide1down.vf v8, v8, fa4
+; CHECK-NEXT: vfslide1down.vf v9, v8, fa3
+; CHECK-NEXT: vfmv.v.f v8, fa4
; CHECK-NEXT: vfslide1down.vf v8, v8, fa5
; CHECK-NEXT: vfslide1down.vf v8, v8, fa6
+; CHECK-NEXT: vmv.v.i v0, 15
; CHECK-NEXT: vfslide1down.vf v8, v8, fa7
+; CHECK-NEXT: vslidedown.vi v8, v9, 4, v0.t
; CHECK-NEXT: ret
%v0 = insertelement <8 x float> poison, float %e0, i64 0
%v1 = insertelement <8 x float> %v0, float %e1, i64 1
@@ -1448,15 +1450,17 @@ define <8 x double> @buildvec_v8f64_zvl256(double %e0, double %e1, double %e2, d
define <8 x double> @buildvec_v8f64_zvl512(double %e0, double %e1, double %e2, double %e3, double %e4, double %e5, double %e6, double %e7) vscale_range(8, 128) {
; CHECK-LABEL: buildvec_v8f64_zvl512:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e64, m1, ta, ma
+; CHECK-NEXT: vsetivli zero, 8, e64, m1, ta, mu
; CHECK-NEXT: vfmv.v.f v8, fa0
; CHECK-NEXT: vfslide1down.vf v8, v8, fa1
; CHECK-NEXT: vfslide1down.vf v8, v8, fa2
-; CHECK-NEXT: vfslide1down.vf v8, v8, fa3
-; CHECK-NEXT: vfslide1down.vf v8, v8, fa4
+; CHECK-NEXT: vfslide1down.vf v9, v8, fa3
+; CHECK-NEXT: vfmv.v.f v8, fa4
; CHECK-NEXT: vfslide1down.vf v8, v8, fa5
; CHECK-NEXT: vfslide1down.vf v8, v8, fa6
+; CHECK-NEXT: vmv.v.i v0, 15
; CHECK-NEXT: vfslide1down.vf v8, v8, fa7
+; CHECK-NEXT: vslidedown.vi v8, v9, 4, v0.t
; CHECK-NEXT: ret
%v0 = insertelement <8 x double> poison, double %e0, i64 0
%v1 = insertelement <8 x double> %v0, double %e1, i64 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll
index 88a86bb..d0dc70f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll
@@ -1,8 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s
define void @fpext_v2f16_v2f32(ptr %x, ptr %y) {
; CHECK-LABEL: fpext_v2f16_v2f32:
@@ -35,28 +33,13 @@ define void @fpext_v2f16_v2f64(ptr %x, ptr %y) {
}
define void @fpext_v8f16_v8f32(ptr %x, ptr %y) {
-; LMULMAX8-LABEL: fpext_v8f16_v8f32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX8-NEXT: vle16.v v8, (a0)
-; LMULMAX8-NEXT: vfwcvt.f.f.v v10, v8
-; LMULMAX8-NEXT: vse32.v v10, (a1)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: fpext_v8f16_v8f32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vle16.v v8, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vfwcvt.f.f.v v9, v8
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v8, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vfwcvt.f.f.v v10, v8
-; LMULMAX1-NEXT: addi a0, a1, 16
-; LMULMAX1-NEXT: vse32.v v10, (a0)
-; LMULMAX1-NEXT: vse32.v v9, (a1)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: fpext_v8f16_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vfwcvt.f.f.v v10, v8
+; CHECK-NEXT: vse32.v v10, (a1)
+; CHECK-NEXT: ret
%a = load <8 x half>, ptr %x
%d = fpext <8 x half> %a to <8 x float>
store <8 x float> %d, ptr %y
@@ -64,50 +47,15 @@ define void @fpext_v8f16_v8f32(ptr %x, ptr %y) {
}
define void @fpext_v8f16_v8f64(ptr %x, ptr %y) {
-; LMULMAX8-LABEL: fpext_v8f16_v8f64:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX8-NEXT: vle16.v v8, (a0)
-; LMULMAX8-NEXT: vfwcvt.f.f.v v10, v8
-; LMULMAX8-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; LMULMAX8-NEXT: vfwcvt.f.f.v v12, v10
-; LMULMAX8-NEXT: vse64.v v12, (a1)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: fpext_v8f16_v8f64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vle16.v v8, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v9, v8, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vfwcvt.f.f.v v10, v9
-; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfwcvt.f.f.v v9, v10
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vfwcvt.f.f.v v10, v8
-; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfwcvt.f.f.v v11, v10
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v8, 4
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vfwcvt.f.f.v v10, v8
-; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfwcvt.f.f.v v12, v10
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vfwcvt.f.f.v v10, v8
-; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfwcvt.f.f.v v8, v10
-; LMULMAX1-NEXT: addi a0, a1, 48
-; LMULMAX1-NEXT: vse64.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, a1, 32
-; LMULMAX1-NEXT: vse64.v v12, (a0)
-; LMULMAX1-NEXT: vse64.v v11, (a1)
-; LMULMAX1-NEXT: addi a1, a1, 16
-; LMULMAX1-NEXT: vse64.v v9, (a1)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: fpext_v8f16_v8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vfwcvt.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v12, v10
+; CHECK-NEXT: vse64.v v12, (a1)
+; CHECK-NEXT: ret
%a = load <8 x half>, ptr %x
%d = fpext <8 x half> %a to <8 x double>
store <8 x double> %d, ptr %y
@@ -145,26 +93,13 @@ define void @fpround_v2f64_v2f16(ptr %x, ptr %y) {
}
define void @fpround_v8f32_v8f16(ptr %x, ptr %y) {
-; LMULMAX8-LABEL: fpround_v8f32_v8f16:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX8-NEXT: vle32.v v8, (a0)
-; LMULMAX8-NEXT: vfncvt.f.f.w v10, v8
-; LMULMAX8-NEXT: vse16.v v10, (a1)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: fpround_v8f32_v8f16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a2, a0, 16
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vle32.v v8, (a0)
-; LMULMAX1-NEXT: vle32.v v9, (a2)
-; LMULMAX1-NEXT: vfncvt.f.f.w v10, v8
-; LMULMAX1-NEXT: vfncvt.f.f.w v8, v9
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v10, v8, 4
-; LMULMAX1-NEXT: vse16.v v10, (a1)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: fpround_v8f32_v8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfncvt.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a1)
+; CHECK-NEXT: ret
%a = load <8 x float>, ptr %x
%d = fptrunc <8 x float> %a to <8 x half>
store <8 x half> %d, ptr %y
@@ -172,49 +107,15 @@ define void @fpround_v8f32_v8f16(ptr %x, ptr %y) {
}
define void @fpround_v8f64_v8f16(ptr %x, ptr %y) {
-; LMULMAX8-LABEL: fpround_v8f64_v8f16:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX8-NEXT: vle64.v v8, (a0)
-; LMULMAX8-NEXT: vfncvt.rod.f.f.w v12, v8
-; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; LMULMAX8-NEXT: vfncvt.f.f.w v8, v12
-; LMULMAX8-NEXT: vse16.v v8, (a1)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: fpround_v8f64_v8f16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a2, a0, 48
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vle64.v v8, (a2)
-; LMULMAX1-NEXT: addi a2, a0, 32
-; LMULMAX1-NEXT: vle64.v v9, (a0)
-; LMULMAX1-NEXT: vle64.v v10, (a2)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vle64.v v11, (a0)
-; LMULMAX1-NEXT: vfncvt.rod.f.f.w v12, v9
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vfncvt.f.f.w v9, v12
-; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.rod.f.f.w v12, v11
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vfncvt.f.f.w v11, v12
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v9, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.rod.f.f.w v11, v10
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vfncvt.f.f.w v10, v11
-; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v9, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.rod.f.f.w v10, v8
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vfncvt.f.f.w v8, v10
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v9, v8, 6
-; LMULMAX1-NEXT: vse16.v v9, (a1)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: fpround_v8f64_v8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vfncvt.rod.f.f.w v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvt.f.f.w v8, v12
+; CHECK-NEXT: vse16.v v8, (a1)
+; CHECK-NEXT: ret
%a = load <8 x double>, ptr %x
%d = fptrunc <8 x double> %a to <8 x half>
store <8 x half> %d, ptr %y
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll
index 1ccb089..dc907ee 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll
@@ -1,8 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV32,LMULMAX2,RV32-LMULMAX2
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV64,LMULMAX2,RV64-LMULMAX2
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV32,LMULMAX1,RV32-LMULMAX1
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV64,LMULMAX1,RV64-LMULMAX1
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV32
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV64
define void @splat_v8f16(ptr %x, half %y) {
; CHECK-LABEL: splat_v8f16:
@@ -44,21 +42,12 @@ define void @splat_v2f64(ptr %x, double %y) {
}
define void @splat_16f16(ptr %x, half %y) {
-; LMULMAX2-LABEL: splat_16f16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vfmv.v.f v8, fa0
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: splat_16f16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vfmv.v.f v8, fa0
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vse16.v v8, (a1)
-; LMULMAX1-NEXT: vse16.v v8, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: splat_16f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vfmv.v.f v8, fa0
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = insertelement <16 x half> poison, half %y, i32 0
%b = shufflevector <16 x half> %a, <16 x half> poison, <16 x i32> zeroinitializer
store <16 x half> %b, ptr %x
@@ -66,21 +55,12 @@ define void @splat_16f16(ptr %x, half %y) {
}
define void @splat_v8f32(ptr %x, float %y) {
-; LMULMAX2-LABEL: splat_v8f32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vfmv.v.f v8, fa0
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: splat_v8f32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vfmv.v.f v8, fa0
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vse32.v v8, (a1)
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: splat_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vfmv.v.f v8, fa0
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = insertelement <8 x float> poison, float %y, i32 0
%b = shufflevector <8 x float> %a, <8 x float> poison, <8 x i32> zeroinitializer
store <8 x float> %b, ptr %x
@@ -88,21 +68,12 @@ define void @splat_v8f32(ptr %x, float %y) {
}
define void @splat_v4f64(ptr %x, double %y) {
-; LMULMAX2-LABEL: splat_v4f64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vfmv.v.f v8, fa0
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: splat_v4f64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vfmv.v.f v8, fa0
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vse64.v v8, (a1)
-; LMULMAX1-NEXT: vse64.v v8, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: splat_v4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vfmv.v.f v8, fa0
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
%a = insertelement <4 x double> poison, double %y, i32 0
%b = shufflevector <4 x double> %a, <4 x double> poison, <4 x i32> zeroinitializer
store <4 x double> %b, ptr %x
@@ -149,21 +120,12 @@ define void @splat_zero_v2f64(ptr %x) {
}
define void @splat_zero_16f16(ptr %x) {
-; LMULMAX2-LABEL: splat_zero_16f16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vmv.v.i v8, 0
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: splat_zero_16f16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v8, 0
-; LMULMAX1-NEXT: vse16.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vse16.v v8, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: splat_zero_16f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = insertelement <16 x half> poison, half 0.0, i32 0
%b = shufflevector <16 x half> %a, <16 x half> poison, <16 x i32> zeroinitializer
store <16 x half> %b, ptr %x
@@ -171,21 +133,12 @@ define void @splat_zero_16f16(ptr %x) {
}
define void @splat_zero_v8f32(ptr %x) {
-; LMULMAX2-LABEL: splat_zero_v8f32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vmv.v.i v8, 0
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: splat_zero_v8f32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v8, 0
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: splat_zero_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = insertelement <8 x float> poison, float 0.0, i32 0
%b = shufflevector <8 x float> %a, <8 x float> poison, <8 x i32> zeroinitializer
store <8 x float> %b, ptr %x
@@ -193,21 +146,12 @@ define void @splat_zero_v8f32(ptr %x) {
}
define void @splat_zero_v4f64(ptr %x) {
-; LMULMAX2-LABEL: splat_zero_v4f64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vmv.v.i v8, 0
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: splat_zero_v4f64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v8, 0
-; LMULMAX1-NEXT: vse64.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vse64.v v8, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: splat_zero_v4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
%a = insertelement <4 x double> poison, double 0.0, i32 0
%b = shufflevector <4 x double> %a, <4 x double> poison, <4 x i32> zeroinitializer
store <4 x double> %b, ptr %x
@@ -267,23 +211,13 @@ define void @splat_negzero_v2f64(ptr %x) {
}
define void @splat_negzero_16f16(ptr %x) {
-; LMULMAX2-LABEL: splat_negzero_16f16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: lui a1, 1048568
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vmv.v.x v8, a1
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: splat_negzero_16f16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: lui a1, 1048568
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vmv.v.x v8, a1
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vse16.v v8, (a1)
-; LMULMAX1-NEXT: vse16.v v8, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: splat_negzero_16f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a1, 1048568
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = insertelement <16 x half> poison, half -0.0, i32 0
%b = shufflevector <16 x half> %a, <16 x half> poison, <16 x i32> zeroinitializer
store <16 x half> %b, ptr %x
@@ -291,23 +225,13 @@ define void @splat_negzero_16f16(ptr %x) {
}
define void @splat_negzero_v8f32(ptr %x) {
-; LMULMAX2-LABEL: splat_negzero_v8f32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: lui a1, 524288
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vmv.v.x v8, a1
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: splat_negzero_v8f32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: lui a1, 524288
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vmv.v.x v8, a1
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vse32.v v8, (a1)
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: splat_negzero_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a1, 524288
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = insertelement <8 x float> poison, float -0.0, i32 0
%b = shufflevector <8 x float> %a, <8 x float> poison, <8 x i32> zeroinitializer
store <8 x float> %b, ptr %x
@@ -315,45 +239,23 @@ define void @splat_negzero_v8f32(ptr %x) {
}
define void @splat_negzero_v4f64(ptr %x) {
-; RV32-LMULMAX2-LABEL: splat_negzero_v4f64:
-; RV32-LMULMAX2: # %bb.0:
-; RV32-LMULMAX2-NEXT: fcvt.d.w fa5, zero
-; RV32-LMULMAX2-NEXT: fneg.d fa5, fa5
-; RV32-LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV32-LMULMAX2-NEXT: vfmv.v.f v8, fa5
-; RV32-LMULMAX2-NEXT: vse64.v v8, (a0)
-; RV32-LMULMAX2-NEXT: ret
-;
-; RV64-LMULMAX2-LABEL: splat_negzero_v4f64:
-; RV64-LMULMAX2: # %bb.0:
-; RV64-LMULMAX2-NEXT: li a1, -1
-; RV64-LMULMAX2-NEXT: slli a1, a1, 63
-; RV64-LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV64-LMULMAX2-NEXT: vmv.v.x v8, a1
-; RV64-LMULMAX2-NEXT: vse64.v v8, (a0)
-; RV64-LMULMAX2-NEXT: ret
-;
-; RV32-LMULMAX1-LABEL: splat_negzero_v4f64:
-; RV32-LMULMAX1: # %bb.0:
-; RV32-LMULMAX1-NEXT: fcvt.d.w fa5, zero
-; RV32-LMULMAX1-NEXT: fneg.d fa5, fa5
-; RV32-LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV32-LMULMAX1-NEXT: vfmv.v.f v8, fa5
-; RV32-LMULMAX1-NEXT: addi a1, a0, 16
-; RV32-LMULMAX1-NEXT: vse64.v v8, (a1)
-; RV32-LMULMAX1-NEXT: vse64.v v8, (a0)
-; RV32-LMULMAX1-NEXT: ret
+; CHECK-RV32-LABEL: splat_negzero_v4f64:
+; CHECK-RV32: # %bb.0:
+; CHECK-RV32-NEXT: fcvt.d.w fa5, zero
+; CHECK-RV32-NEXT: fneg.d fa5, fa5
+; CHECK-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-RV32-NEXT: vfmv.v.f v8, fa5
+; CHECK-RV32-NEXT: vse64.v v8, (a0)
+; CHECK-RV32-NEXT: ret
;
-; RV64-LMULMAX1-LABEL: splat_negzero_v4f64:
-; RV64-LMULMAX1: # %bb.0:
-; RV64-LMULMAX1-NEXT: li a1, -1
-; RV64-LMULMAX1-NEXT: slli a1, a1, 63
-; RV64-LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV64-LMULMAX1-NEXT: vmv.v.x v8, a1
-; RV64-LMULMAX1-NEXT: addi a1, a0, 16
-; RV64-LMULMAX1-NEXT: vse64.v v8, (a1)
-; RV64-LMULMAX1-NEXT: vse64.v v8, (a0)
-; RV64-LMULMAX1-NEXT: ret
+; CHECK-RV64-LABEL: splat_negzero_v4f64:
+; CHECK-RV64: # %bb.0:
+; CHECK-RV64-NEXT: li a1, -1
+; CHECK-RV64-NEXT: slli a1, a1, 63
+; CHECK-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-RV64-NEXT: vmv.v.x v8, a1
+; CHECK-RV64-NEXT: vse64.v v8, (a0)
+; CHECK-RV64-NEXT: ret
%a = insertelement <4 x double> poison, double -0.0, i32 0
%b = shufflevector <4 x double> %a, <4 x double> poison, <4 x i32> zeroinitializer
store <4 x double> %b, ptr %x
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll
index 36294af..de7dfab 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll
@@ -1,8 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s
define void @gather_const_v8f16(ptr %x) {
; CHECK-LABEL: gather_const_v8f16:
@@ -52,36 +50,14 @@ define void @gather_const_v2f64(ptr %x) {
}
define void @gather_const_v64f16(ptr %x) {
-; LMULMAX8-LABEL: gather_const_v64f16:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a1, 64
-; LMULMAX8-NEXT: addi a2, a0, 94
-; LMULMAX8-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; LMULMAX8-NEXT: vlse16.v v8, (a2), zero
-; LMULMAX8-NEXT: vse16.v v8, (a0)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: gather_const_v64f16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, a0, 80
-; LMULMAX1-NEXT: addi a2, a0, 16
-; LMULMAX1-NEXT: addi a3, a0, 48
-; LMULMAX1-NEXT: addi a4, a0, 32
-; LMULMAX1-NEXT: addi a5, a0, 94
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vlse16.v v8, (a5), zero
-; LMULMAX1-NEXT: addi a5, a0, 64
-; LMULMAX1-NEXT: addi a6, a0, 112
-; LMULMAX1-NEXT: addi a7, a0, 96
-; LMULMAX1-NEXT: vse16.v v8, (a7)
-; LMULMAX1-NEXT: vse16.v v8, (a6)
-; LMULMAX1-NEXT: vse16.v v8, (a5)
-; LMULMAX1-NEXT: vse16.v v8, (a1)
-; LMULMAX1-NEXT: vse16.v v8, (a4)
-; LMULMAX1-NEXT: vse16.v v8, (a3)
-; LMULMAX1-NEXT: vse16.v v8, (a0)
-; LMULMAX1-NEXT: vse16.v v8, (a2)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: gather_const_v64f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 64
+; CHECK-NEXT: addi a2, a0, 94
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vlse16.v v8, (a2), zero
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <64 x half>, ptr %x
%b = extractelement <64 x half> %a, i32 47
%c = insertelement <64 x half> poison, half %b, i32 0
@@ -91,36 +67,14 @@ define void @gather_const_v64f16(ptr %x) {
}
define void @gather_const_v32f32(ptr %x) {
-; LMULMAX8-LABEL: gather_const_v32f32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a1, 32
-; LMULMAX8-NEXT: addi a2, a0, 68
-; LMULMAX8-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; LMULMAX8-NEXT: vlse32.v v8, (a2), zero
-; LMULMAX8-NEXT: vse32.v v8, (a0)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: gather_const_v32f32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, a0, 64
-; LMULMAX1-NEXT: addi a2, a0, 16
-; LMULMAX1-NEXT: addi a3, a0, 48
-; LMULMAX1-NEXT: addi a4, a0, 32
-; LMULMAX1-NEXT: addi a5, a0, 68
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vlse32.v v8, (a5), zero
-; LMULMAX1-NEXT: addi a5, a0, 80
-; LMULMAX1-NEXT: addi a6, a0, 112
-; LMULMAX1-NEXT: addi a7, a0, 96
-; LMULMAX1-NEXT: vse32.v v8, (a7)
-; LMULMAX1-NEXT: vse32.v v8, (a6)
-; LMULMAX1-NEXT: vse32.v v8, (a1)
-; LMULMAX1-NEXT: vse32.v v8, (a5)
-; LMULMAX1-NEXT: vse32.v v8, (a4)
-; LMULMAX1-NEXT: vse32.v v8, (a3)
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: vse32.v v8, (a2)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: gather_const_v32f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 32
+; CHECK-NEXT: addi a2, a0, 68
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vlse32.v v8, (a2), zero
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <32 x float>, ptr %x
%b = extractelement <32 x float> %a, i32 17
%c = insertelement <32 x float> poison, float %b, i32 0
@@ -130,34 +84,13 @@ define void @gather_const_v32f32(ptr %x) {
}
define void @gather_const_v16f64(ptr %x) {
-; LMULMAX8-LABEL: gather_const_v16f64:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: addi a1, a0, 80
-; LMULMAX8-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; LMULMAX8-NEXT: vlse64.v v8, (a1), zero
-; LMULMAX8-NEXT: vse64.v v8, (a0)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: gather_const_v16f64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, a0, 80
-; LMULMAX1-NEXT: addi a2, a0, 16
-; LMULMAX1-NEXT: addi a3, a0, 48
-; LMULMAX1-NEXT: addi a4, a0, 32
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vlse64.v v8, (a1), zero
-; LMULMAX1-NEXT: addi a5, a0, 64
-; LMULMAX1-NEXT: addi a6, a0, 112
-; LMULMAX1-NEXT: addi a7, a0, 96
-; LMULMAX1-NEXT: vse64.v v8, (a7)
-; LMULMAX1-NEXT: vse64.v v8, (a6)
-; LMULMAX1-NEXT: vse64.v v8, (a5)
-; LMULMAX1-NEXT: vse64.v v8, (a1)
-; LMULMAX1-NEXT: vse64.v v8, (a4)
-; LMULMAX1-NEXT: vse64.v v8, (a3)
-; LMULMAX1-NEXT: vse64.v v8, (a0)
-; LMULMAX1-NEXT: vse64.v v8, (a2)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: gather_const_v16f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a1, a0, 80
+; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; CHECK-NEXT: vlse64.v v8, (a1), zero
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <16 x double>, ptr %x
%b = extractelement <16 x double> %a, i32 10
%c = insertelement <16 x double> poison, double %b, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
index dd79311..0f003d7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
@@ -1,21 +1,15 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,LMULMAX2
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,LMULMAX2
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,LMULMAX1,LMULMAX1-RV32
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,LMULMAX1,LMULMAX1-RV64
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfhmin,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,LMULMAX2
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfhmin,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,LMULMAX2
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfhmin,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,LMULMAX1,LMULMAX1-RV32
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfhmin,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,LMULMAX1,LMULMAX1-RV64
-
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfhmin,+f,+d,+zvl256b -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMINLMULMAX2,ZVFHMINLMULMAX2-RV32
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfhmin,+f,+d,+zvl256b -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMINLMULMAX2,ZVFHMINLMULMAX2-RV64
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfhmin,+f,+zvl256b -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMINLMULMAX1-RV32
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfhmin,+f,+zvl256b -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMINLMULMAX1-RV64
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfhmin,+zvfhmin,+f,+zvl256b -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMINLMULMAX2,ZVFHMINLMULMAX2-RV32
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfhmin,+zvfhmin,+f,+zvl256b -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMINLMULMAX2,ZVFHMINLMULMAX2-RV64
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfhmin,+zvfhmin,+f,+zvl256b -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMINLMULMAX1-RV32
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfhmin,+zvfhmin,+f,+zvl256b -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMINLMULMAX1-RV64
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfhmin,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfhmin,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfhmin,+f,+d,+zvl256b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN-RV32
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfhmin,+f,+d,+zvl256b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN-RV64
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfhmin,+f,+zvl256b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN-RV32
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfhmin,+f,+zvl256b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN-RV64
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfhmin,+zvfhmin,+f,+zvl256b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN-RV32
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfhmin,+zvfhmin,+f,+zvl256b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN-RV64
define void @fadd_v8f16(ptr %x, ptr %y) {
; ZVFH-LABEL: fadd_v8f16:
@@ -59,79 +53,42 @@ define void @fadd_v6f16(ptr %x, ptr %y) {
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: fadd_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfadd.vv v8, v8, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: fadd_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfadd.vv v8, v8, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: fadd_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfadd.vv v8, v8, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: fadd_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfadd.vv v8, v8, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: fadd_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vle16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfadd.vv v8, v8, v10
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vse32.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: fadd_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a1)
+; ZVFHMIN-RV64-NEXT: vle16.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfadd.vv v8, v8, v10
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = load <6 x half>, ptr %y
%c = fadd <6 x half> %a, %b
@@ -222,79 +179,42 @@ define void @fsub_v6f16(ptr %x, ptr %y) {
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: fsub_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfsub.vv v8, v8, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: fsub_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfsub.vv v8, v8, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: fsub_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfsub.vv v8, v8, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: fsub_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfsub.vv v8, v8, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: fsub_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vle16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfsub.vv v8, v8, v10
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vse32.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: fsub_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a1)
+; ZVFHMIN-RV64-NEXT: vle16.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfsub.vv v8, v8, v10
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = load <6 x half>, ptr %y
%c = fsub <6 x half> %a, %b
@@ -385,79 +305,42 @@ define void @fmul_v6f16(ptr %x, ptr %y) {
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: fmul_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfmul.vv v8, v8, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: fmul_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfmul.vv v8, v8, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: fmul_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfmul.vv v8, v8, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: fmul_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfmul.vv v8, v8, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: fmul_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vle16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfmul.vv v8, v8, v10
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vse32.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: fmul_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a1)
+; ZVFHMIN-RV64-NEXT: vle16.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfmul.vv v8, v8, v10
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = load <6 x half>, ptr %y
%c = fmul <6 x half> %a, %b
@@ -548,79 +431,42 @@ define void @fdiv_v6f16(ptr %x, ptr %y) {
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: fdiv_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfdiv.vv v8, v8, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: fdiv_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfdiv.vv v8, v8, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: fdiv_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfdiv.vv v8, v8, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: fdiv_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfdiv.vv v8, v8, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: fdiv_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vle16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfdiv.vv v8, v8, v10
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vse32.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: fdiv_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a1)
+; ZVFHMIN-RV64-NEXT: vle16.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfdiv.vv v8, v8, v10
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = load <6 x half>, ptr %y
%c = fdiv <6 x half> %a, %b
@@ -706,71 +552,38 @@ define void @fneg_v6f16(ptr %x) {
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: fneg_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfneg.v v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: fneg_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfneg.v v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: fneg_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfneg.v v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: fneg_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfneg.v v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: fneg_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfneg.v v8, v9
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV32-NEXT: vse32.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: fneg_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfneg.v v8, v9
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = fneg <6 x half> %a
store <6 x half> %b, ptr %x
@@ -851,71 +664,38 @@ define void @fabs_v6f16(ptr %x) {
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: fabs_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfabs.v v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: fabs_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfabs.v v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: fabs_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfabs.v v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: fabs_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfabs.v v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: fabs_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfabs.v v8, v9
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV32-NEXT: vse32.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: fabs_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfabs.v v8, v9
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = call <6 x half> @llvm.fabs.v6f16(<6 x half> %a)
store <6 x half> %b, ptr %x
@@ -1004,79 +784,42 @@ define void @copysign_v6f16(ptr %x, ptr %y) {
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: copysign_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfsgnj.vv v8, v8, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: copysign_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfsgnj.vv v8, v8, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: copysign_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfsgnj.vv v8, v8, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: copysign_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfsgnj.vv v8, v8, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: copysign_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vle16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfsgnj.vv v8, v8, v10
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vse32.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: copysign_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a1)
+; ZVFHMIN-RV64-NEXT: vle16.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfsgnj.vv v8, v8, v10
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = load <6 x half>, ptr %y
%c = call <6 x half> @llvm.copysign.v6f16(<6 x half> %a, <6 x half> %b)
@@ -1174,99 +917,52 @@ define void @copysign_vf_v6f16(ptr %x, half %y) {
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: copysign_vf_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfsgnj.vv v8, v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: copysign_vf_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfsgnj.vv v8, v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: copysign_vf_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfsgnj.vv v8, v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: copysign_vf_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfsgnj.vv v8, v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: copysign_vf_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV32-NEXT: fcvt.s.h fa5, fa0
+; ZVFHMIN-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfmv.v.f v9, fa5
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v10, v9
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v8, v10
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfsgnj.vv v8, v9, v8
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vse32.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: copysign_vf_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: fcvt.s.h fa5, fa0
+; ZVFHMIN-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfmv.v.f v9, fa5
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v10, v9
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v8, v10
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfsgnj.vv v8, v9, v8
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = insertelement <6 x half> poison, half %y, i32 0
%c = shufflevector <6 x half> %b, <6 x half> poison, <6 x i32> zeroinitializer
@@ -1363,99 +1059,52 @@ define void @copysign_neg_v6f16(ptr %x, ptr %y) {
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: copysign_neg_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfneg.v v8, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v10, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v9, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfsgnj.vv v8, v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: copysign_neg_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfneg.v v8, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v10, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v9, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfsgnj.vv v8, v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: copysign_neg_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfneg.v v8, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v10, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v9, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfsgnj.vv v8, v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: copysign_neg_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfneg.v v8, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v10, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v9, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfsgnj.vv v8, v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: copysign_neg_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vle16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfneg.v v8, v10
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v10, v8
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v9, v10
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfsgnj.vv v8, v8, v9
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vse32.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: copysign_neg_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a1)
+; ZVFHMIN-RV64-NEXT: vle16.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfneg.v v8, v10
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v10, v8
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v9, v10
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfsgnj.vv v8, v8, v9
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = load <6 x half>, ptr %y
%c = fneg <6 x half> %b
@@ -1560,133 +1209,69 @@ define void @copysign_neg_trunc_v3f16_v3f32(ptr %x, ptr %y) {
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: copysign_neg_trunc_v3f16_v3f32:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: addi sp, sp, -16
-; ZVFHMINLMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 16
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 3, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle32.v v9, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfneg.v v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfsgnj.vv v8, v10, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, sp, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v9, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: flh fa5, 12(sp)
-; ZVFHMINLMULMAX2-RV32-NEXT: fsh fa5, 4(a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: addi sp, sp, 16
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: copysign_neg_trunc_v3f16_v3f32:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: addi sp, sp, -16
-; ZVFHMINLMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 16
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle64.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: mv a2, sp
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v8, (a2)
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a2)
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 3, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle32.v v9, (a1)
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfneg.v v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfsgnj.vv v8, v10, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a1, sp, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vse16.v v9, (a1)
-; ZVFHMINLMULMAX2-RV64-NEXT: flh fa5, 12(sp)
-; ZVFHMINLMULMAX2-RV64-NEXT: fsh fa5, 4(a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: addi sp, sp, 16
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: copysign_neg_trunc_v3f16_v3f32:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: addi sp, sp, -16
-; ZVFHMINLMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 16
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 3, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle32.v v9, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfneg.v v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfsgnj.vv v8, v10, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, sp, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v9, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: flh fa5, 12(sp)
-; ZVFHMINLMULMAX1-RV32-NEXT: fsh fa5, 4(a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: addi sp, sp, 16
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: copysign_neg_trunc_v3f16_v3f32:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: addi sp, sp, -16
-; ZVFHMINLMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 16
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: mv a2, sp
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v8, (a2)
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a2)
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 3, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle32.v v9, (a1)
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfneg.v v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfsgnj.vv v8, v10, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a1, sp, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vse16.v v9, (a1)
-; ZVFHMINLMULMAX1-RV64-NEXT: flh fa5, 12(sp)
-; ZVFHMINLMULMAX1-RV64-NEXT: fsh fa5, 4(a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: addi sp, sp, 16
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: copysign_neg_trunc_v3f16_v3f32:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: addi sp, sp, -16
+; ZVFHMIN-RV32-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 3, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle32.v v9, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfneg.v v8, v9
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfsgnj.vv v8, v10, v8
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV32-NEXT: addi a1, sp, 8
+; ZVFHMIN-RV32-NEXT: vse16.v v9, (a1)
+; ZVFHMIN-RV32-NEXT: flh fa5, 12(sp)
+; ZVFHMIN-RV32-NEXT: fsh fa5, 4(a0)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vse32.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: addi sp, sp, 16
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: copysign_neg_trunc_v3f16_v3f32:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: addi sp, sp, -16
+; ZVFHMIN-RV64-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vle64.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: mv a2, sp
+; ZVFHMIN-RV64-NEXT: vse64.v v8, (a2)
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a2)
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 3, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle32.v v9, (a1)
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfneg.v v8, v9
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfsgnj.vv v8, v10, v8
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV64-NEXT: addi a1, sp, 8
+; ZVFHMIN-RV64-NEXT: vse16.v v9, (a1)
+; ZVFHMIN-RV64-NEXT: flh fa5, 12(sp)
+; ZVFHMIN-RV64-NEXT: fsh fa5, 4(a0)
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse32.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: addi sp, sp, 16
+; ZVFHMIN-RV64-NEXT: ret
%a = load <3 x half>, ptr %x
%b = load <3 x float>, ptr %y
%c = fneg <3 x float> %b
@@ -1755,71 +1340,38 @@ define void @sqrt_v6f16(ptr %x) {
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: sqrt_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfsqrt.v v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: sqrt_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfsqrt.v v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: sqrt_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfsqrt.v v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: sqrt_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfsqrt.v v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: sqrt_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfsqrt.v v8, v9
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV32-NEXT: vse32.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: sqrt_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfsqrt.v v8, v9
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = call <6 x half> @llvm.sqrt.v6f16(<6 x half> %a)
store <6 x half> %b, ptr %x
@@ -1913,87 +1465,46 @@ define void @fma_v6f16(ptr %x, ptr %y, ptr %z) {
; ZVFH-NEXT: vse16.v v10, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: fma_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a2)
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v10, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v11, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v9, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfmadd.vv v9, v8, v11
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v9, v8, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v9, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: fma_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a2)
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v10, (a1)
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v11, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v9, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfmadd.vv v9, v8, v11
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v8, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: fma_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a2)
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v10, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v11, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v9, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfmadd.vv v9, v8, v11
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v9, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: fma_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a2)
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v10, (a1)
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v11, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v9, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfmadd.vv v9, v8, v11
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: fma_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a2)
+; ZVFHMIN-RV32-NEXT: vle16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: vle16.v v10, (a1)
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v11, v8
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v9, v10
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfmadd.vv v9, v8, v11
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v9, v8, 2
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vse32.v v9, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v8, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: fma_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a2)
+; ZVFHMIN-RV64-NEXT: vle16.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vle16.v v10, (a1)
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v11, v8
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v9, v10
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfmadd.vv v9, v8, v11
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v8, 2
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = load <6 x half>, ptr %y
%c = load <6 x half>, ptr %z
@@ -2104,107 +1615,56 @@ define void @fmsub_v6f16(ptr %x, ptr %y, ptr %z) {
; ZVFH-NEXT: vse16.v v10, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: fmsub_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a2)
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v10, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v11, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfneg.v v8, v11
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v11, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v9, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v10, v11
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfmacc.vv v10, v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v8, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v9, v8, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v9, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: fmsub_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a2)
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v10, (a1)
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v11, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfneg.v v8, v11
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v11, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v9, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v10, v11
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfmacc.vv v10, v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v8, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v8, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: fmsub_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a2)
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v10, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v11, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfneg.v v8, v11
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v11, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v9, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v10, v11
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfmacc.vv v10, v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v8, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v9, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: fmsub_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a2)
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v10, (a1)
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v11, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfneg.v v8, v11
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v11, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v9, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v10, v11
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfmacc.vv v10, v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v8, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: fmsub_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a2)
+; ZVFHMIN-RV32-NEXT: vle16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: vle16.v v10, (a1)
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v11, v8
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfneg.v v8, v11
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v11, v8
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v9, v10
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v10, v11
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfmacc.vv v10, v8, v9
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v8, v10
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v9, v8, 2
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vse32.v v9, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v8, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: fmsub_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a2)
+; ZVFHMIN-RV64-NEXT: vle16.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vle16.v v10, (a1)
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v11, v8
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfneg.v v8, v11
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v11, v8
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v9, v10
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v10, v11
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfmacc.vv v10, v8, v9
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v8, v10
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v8, 2
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = load <6 x half>, ptr %y
%c = load <6 x half>, ptr %z
@@ -2264,58 +1724,28 @@ define void @fnmadd_v2f64(ptr %x, ptr %y, ptr %z) {
}
define void @fadd_v16f16(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: fadd_v16f16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vle16.v v10, (a1)
-; LMULMAX2-NEXT: vfadd.vv v8, v8, v10
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: fadd_v16f16:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle16.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vfadd.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vfadd.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: fadd_v16f16:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle16.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vfadd.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vfadd.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX2-LABEL: fadd_v16f16:
-; ZVFHMINLMULMAX2: # %bb.0:
-; ZVFHMINLMULMAX2-NEXT: vsetivli zero, 16, e16, m1, ta, ma
-; ZVFHMINLMULMAX2-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX2-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX2-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX2-NEXT: vfwcvt.f.f.v v12, v9
-; ZVFHMINLMULMAX2-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMINLMULMAX2-NEXT: vfadd.vv v8, v12, v10
-; ZVFHMINLMULMAX2-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; ZVFHMINLMULMAX2-NEXT: vfncvt.f.f.w v10, v8
-; ZVFHMINLMULMAX2-NEXT: vse16.v v10, (a0)
-; ZVFHMINLMULMAX2-NEXT: ret
+; ZVFH-LABEL: fadd_v16f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; ZVFH-NEXT: vle16.v v8, (a0)
+; ZVFH-NEXT: vle16.v v10, (a1)
+; ZVFH-NEXT: vfadd.vv v8, v8, v10
+; ZVFH-NEXT: vse16.v v8, (a0)
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: fadd_v16f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vle16.v v8, (a1)
+; ZVFHMIN-NEXT: vle16.v v9, (a0)
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v8, v12, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT: vse16.v v10, (a0)
+; ZVFHMIN-NEXT: ret
%a = load <16 x half>, ptr %x
%b = load <16 x half>, ptr %y
%c = fadd <16 x half> %a, %b
@@ -2324,44 +1754,14 @@ define void @fadd_v16f16(ptr %x, ptr %y) {
}
define void @fadd_v8f32(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: fadd_v8f32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vle32.v v10, (a1)
-; LMULMAX2-NEXT: vfadd.vv v8, v8, v10
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: fadd_v8f32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle32.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vfadd.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vfadd.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: fadd_v8f32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vfadd.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vfadd.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; ZVFH-LABEL: fadd_v8f32:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; ZVFH-NEXT: vle32.v v8, (a0)
+; ZVFH-NEXT: vle32.v v10, (a1)
+; ZVFH-NEXT: vfadd.vv v8, v8, v10
+; ZVFH-NEXT: vse32.v v8, (a0)
+; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: fadd_v8f32:
; ZVFHMIN: # %bb.0:
@@ -2379,44 +1779,14 @@ define void @fadd_v8f32(ptr %x, ptr %y) {
}
define void @fadd_v4f64(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: fadd_v4f64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vle64.v v10, (a1)
-; LMULMAX2-NEXT: vfadd.vv v8, v8, v10
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: fadd_v4f64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle64.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vfadd.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vfadd.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: fadd_v4f64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vfadd.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vfadd.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; ZVFH-LABEL: fadd_v4f64:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; ZVFH-NEXT: vle64.v v8, (a0)
+; ZVFH-NEXT: vle64.v v10, (a1)
+; ZVFH-NEXT: vfadd.vv v8, v8, v10
+; ZVFH-NEXT: vse64.v v8, (a0)
+; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: fadd_v4f64:
; ZVFHMIN: # %bb.0:
@@ -2434,58 +1804,28 @@ define void @fadd_v4f64(ptr %x, ptr %y) {
}
define void @fsub_v16f16(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: fsub_v16f16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vle16.v v10, (a1)
-; LMULMAX2-NEXT: vfsub.vv v8, v8, v10
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: fsub_v16f16:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle16.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vfsub.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vfsub.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: fsub_v16f16:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle16.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vfsub.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vfsub.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX2-LABEL: fsub_v16f16:
-; ZVFHMINLMULMAX2: # %bb.0:
-; ZVFHMINLMULMAX2-NEXT: vsetivli zero, 16, e16, m1, ta, ma
-; ZVFHMINLMULMAX2-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX2-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX2-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX2-NEXT: vfwcvt.f.f.v v12, v9
-; ZVFHMINLMULMAX2-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMINLMULMAX2-NEXT: vfsub.vv v8, v12, v10
-; ZVFHMINLMULMAX2-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; ZVFHMINLMULMAX2-NEXT: vfncvt.f.f.w v10, v8
-; ZVFHMINLMULMAX2-NEXT: vse16.v v10, (a0)
-; ZVFHMINLMULMAX2-NEXT: ret
+; ZVFH-LABEL: fsub_v16f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; ZVFH-NEXT: vle16.v v8, (a0)
+; ZVFH-NEXT: vle16.v v10, (a1)
+; ZVFH-NEXT: vfsub.vv v8, v8, v10
+; ZVFH-NEXT: vse16.v v8, (a0)
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: fsub_v16f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vle16.v v8, (a1)
+; ZVFHMIN-NEXT: vle16.v v9, (a0)
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vfsub.vv v8, v12, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT: vse16.v v10, (a0)
+; ZVFHMIN-NEXT: ret
%a = load <16 x half>, ptr %x
%b = load <16 x half>, ptr %y
%c = fsub <16 x half> %a, %b
@@ -2494,44 +1834,14 @@ define void @fsub_v16f16(ptr %x, ptr %y) {
}
define void @fsub_v8f32(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: fsub_v8f32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vle32.v v10, (a1)
-; LMULMAX2-NEXT: vfsub.vv v8, v8, v10
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: fsub_v8f32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle32.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vfsub.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vfsub.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: fsub_v8f32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vfsub.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vfsub.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; ZVFH-LABEL: fsub_v8f32:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; ZVFH-NEXT: vle32.v v8, (a0)
+; ZVFH-NEXT: vle32.v v10, (a1)
+; ZVFH-NEXT: vfsub.vv v8, v8, v10
+; ZVFH-NEXT: vse32.v v8, (a0)
+; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: fsub_v8f32:
; ZVFHMIN: # %bb.0:
@@ -2549,44 +1859,14 @@ define void @fsub_v8f32(ptr %x, ptr %y) {
}
define void @fsub_v4f64(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: fsub_v4f64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vle64.v v10, (a1)
-; LMULMAX2-NEXT: vfsub.vv v8, v8, v10
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: fsub_v4f64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle64.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vfsub.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vfsub.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: fsub_v4f64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vfsub.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vfsub.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; ZVFH-LABEL: fsub_v4f64:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; ZVFH-NEXT: vle64.v v8, (a0)
+; ZVFH-NEXT: vle64.v v10, (a1)
+; ZVFH-NEXT: vfsub.vv v8, v8, v10
+; ZVFH-NEXT: vse64.v v8, (a0)
+; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: fsub_v4f64:
; ZVFHMIN: # %bb.0:
@@ -2604,58 +1884,28 @@ define void @fsub_v4f64(ptr %x, ptr %y) {
}
define void @fmul_v16f16(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: fmul_v16f16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vle16.v v10, (a1)
-; LMULMAX2-NEXT: vfmul.vv v8, v8, v10
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: fmul_v16f16:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle16.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vfmul.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vfmul.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: fmul_v16f16:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle16.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vfmul.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vfmul.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX2-LABEL: fmul_v16f16:
-; ZVFHMINLMULMAX2: # %bb.0:
-; ZVFHMINLMULMAX2-NEXT: vsetivli zero, 16, e16, m1, ta, ma
-; ZVFHMINLMULMAX2-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX2-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX2-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX2-NEXT: vfwcvt.f.f.v v12, v9
-; ZVFHMINLMULMAX2-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMINLMULMAX2-NEXT: vfmul.vv v8, v12, v10
-; ZVFHMINLMULMAX2-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; ZVFHMINLMULMAX2-NEXT: vfncvt.f.f.w v10, v8
-; ZVFHMINLMULMAX2-NEXT: vse16.v v10, (a0)
-; ZVFHMINLMULMAX2-NEXT: ret
+; ZVFH-LABEL: fmul_v16f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; ZVFH-NEXT: vle16.v v8, (a0)
+; ZVFH-NEXT: vle16.v v10, (a1)
+; ZVFH-NEXT: vfmul.vv v8, v8, v10
+; ZVFH-NEXT: vse16.v v8, (a0)
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: fmul_v16f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vle16.v v8, (a1)
+; ZVFHMIN-NEXT: vle16.v v9, (a0)
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vfmul.vv v8, v12, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT: vse16.v v10, (a0)
+; ZVFHMIN-NEXT: ret
%a = load <16 x half>, ptr %x
%b = load <16 x half>, ptr %y
%c = fmul <16 x half> %a, %b
@@ -2664,44 +1914,14 @@ define void @fmul_v16f16(ptr %x, ptr %y) {
}
define void @fmul_v8f32(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: fmul_v8f32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vle32.v v10, (a1)
-; LMULMAX2-NEXT: vfmul.vv v8, v8, v10
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: fmul_v8f32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle32.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vfmul.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vfmul.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: fmul_v8f32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vfmul.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vfmul.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; ZVFH-LABEL: fmul_v8f32:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; ZVFH-NEXT: vle32.v v8, (a0)
+; ZVFH-NEXT: vle32.v v10, (a1)
+; ZVFH-NEXT: vfmul.vv v8, v8, v10
+; ZVFH-NEXT: vse32.v v8, (a0)
+; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: fmul_v8f32:
; ZVFHMIN: # %bb.0:
@@ -2719,44 +1939,14 @@ define void @fmul_v8f32(ptr %x, ptr %y) {
}
define void @fmul_v4f64(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: fmul_v4f64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vle64.v v10, (a1)
-; LMULMAX2-NEXT: vfmul.vv v8, v8, v10
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: fmul_v4f64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle64.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vfmul.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vfmul.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: fmul_v4f64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vfmul.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vfmul.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; ZVFH-LABEL: fmul_v4f64:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; ZVFH-NEXT: vle64.v v8, (a0)
+; ZVFH-NEXT: vle64.v v10, (a1)
+; ZVFH-NEXT: vfmul.vv v8, v8, v10
+; ZVFH-NEXT: vse64.v v8, (a0)
+; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: fmul_v4f64:
; ZVFHMIN: # %bb.0:
@@ -2774,58 +1964,28 @@ define void @fmul_v4f64(ptr %x, ptr %y) {
}
define void @fdiv_v16f16(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: fdiv_v16f16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vle16.v v10, (a1)
-; LMULMAX2-NEXT: vfdiv.vv v8, v8, v10
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: fdiv_v16f16:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle16.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vfdiv.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vfdiv.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: fdiv_v16f16:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle16.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vfdiv.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vfdiv.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX2-LABEL: fdiv_v16f16:
-; ZVFHMINLMULMAX2: # %bb.0:
-; ZVFHMINLMULMAX2-NEXT: vsetivli zero, 16, e16, m1, ta, ma
-; ZVFHMINLMULMAX2-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX2-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX2-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX2-NEXT: vfwcvt.f.f.v v12, v9
-; ZVFHMINLMULMAX2-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMINLMULMAX2-NEXT: vfdiv.vv v8, v12, v10
-; ZVFHMINLMULMAX2-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; ZVFHMINLMULMAX2-NEXT: vfncvt.f.f.w v10, v8
-; ZVFHMINLMULMAX2-NEXT: vse16.v v10, (a0)
-; ZVFHMINLMULMAX2-NEXT: ret
+; ZVFH-LABEL: fdiv_v16f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; ZVFH-NEXT: vle16.v v8, (a0)
+; ZVFH-NEXT: vle16.v v10, (a1)
+; ZVFH-NEXT: vfdiv.vv v8, v8, v10
+; ZVFH-NEXT: vse16.v v8, (a0)
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: fdiv_v16f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vle16.v v8, (a1)
+; ZVFHMIN-NEXT: vle16.v v9, (a0)
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vfdiv.vv v8, v12, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT: vse16.v v10, (a0)
+; ZVFHMIN-NEXT: ret
%a = load <16 x half>, ptr %x
%b = load <16 x half>, ptr %y
%c = fdiv <16 x half> %a, %b
@@ -2834,44 +1994,14 @@ define void @fdiv_v16f16(ptr %x, ptr %y) {
}
define void @fdiv_v8f32(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: fdiv_v8f32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vle32.v v10, (a1)
-; LMULMAX2-NEXT: vfdiv.vv v8, v8, v10
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: fdiv_v8f32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle32.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vfdiv.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vfdiv.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: fdiv_v8f32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vfdiv.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vfdiv.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; ZVFH-LABEL: fdiv_v8f32:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; ZVFH-NEXT: vle32.v v8, (a0)
+; ZVFH-NEXT: vle32.v v10, (a1)
+; ZVFH-NEXT: vfdiv.vv v8, v8, v10
+; ZVFH-NEXT: vse32.v v8, (a0)
+; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: fdiv_v8f32:
; ZVFHMIN: # %bb.0:
@@ -2889,44 +2019,14 @@ define void @fdiv_v8f32(ptr %x, ptr %y) {
}
define void @fdiv_v4f64(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: fdiv_v4f64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vle64.v v10, (a1)
-; LMULMAX2-NEXT: vfdiv.vv v8, v8, v10
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: fdiv_v4f64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle64.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vfdiv.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vfdiv.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: fdiv_v4f64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vfdiv.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vfdiv.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; ZVFH-LABEL: fdiv_v4f64:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; ZVFH-NEXT: vle64.v v8, (a0)
+; ZVFH-NEXT: vle64.v v10, (a1)
+; ZVFH-NEXT: vfdiv.vv v8, v8, v10
+; ZVFH-NEXT: vse64.v v8, (a0)
+; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: fdiv_v4f64:
; ZVFHMIN: # %bb.0:
@@ -2944,37 +2044,25 @@ define void @fdiv_v4f64(ptr %x, ptr %y) {
}
define void @fneg_v16f16(ptr %x) {
-; LMULMAX2-LABEL: fneg_v16f16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vfneg.v v8, v8
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: fneg_v16f16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vle16.v v8, (a1)
-; LMULMAX1-NEXT: vle16.v v9, (a0)
-; LMULMAX1-NEXT: vfneg.v v8, v8
-; LMULMAX1-NEXT: vfneg.v v9, v9
-; LMULMAX1-NEXT: vse16.v v9, (a0)
-; LMULMAX1-NEXT: vse16.v v8, (a1)
-; LMULMAX1-NEXT: ret
-;
-; ZVFHMINLMULMAX2-LABEL: fneg_v16f16:
-; ZVFHMINLMULMAX2: # %bb.0:
-; ZVFHMINLMULMAX2-NEXT: vsetivli zero, 16, e16, m1, ta, ma
-; ZVFHMINLMULMAX2-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMINLMULMAX2-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMINLMULMAX2-NEXT: vfneg.v v8, v10
-; ZVFHMINLMULMAX2-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; ZVFHMINLMULMAX2-NEXT: vfncvt.f.f.w v10, v8
-; ZVFHMINLMULMAX2-NEXT: vse16.v v10, (a0)
-; ZVFHMINLMULMAX2-NEXT: ret
+; ZVFH-LABEL: fneg_v16f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; ZVFH-NEXT: vle16.v v8, (a0)
+; ZVFH-NEXT: vfneg.v v8, v8
+; ZVFH-NEXT: vse16.v v8, (a0)
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: fneg_v16f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vfneg.v v8, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT: vse16.v v10, (a0)
+; ZVFHMIN-NEXT: ret
%a = load <16 x half>, ptr %x
%b = fneg <16 x half> %a
store <16 x half> %b, ptr %x
@@ -2982,25 +2070,13 @@ define void @fneg_v16f16(ptr %x) {
}
define void @fneg_v8f32(ptr %x) {
-; LMULMAX2-LABEL: fneg_v8f32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vfneg.v v8, v8
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: fneg_v8f32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vle32.v v8, (a1)
-; LMULMAX1-NEXT: vle32.v v9, (a0)
-; LMULMAX1-NEXT: vfneg.v v8, v8
-; LMULMAX1-NEXT: vfneg.v v9, v9
-; LMULMAX1-NEXT: vse32.v v9, (a0)
-; LMULMAX1-NEXT: vse32.v v8, (a1)
-; LMULMAX1-NEXT: ret
+; ZVFH-LABEL: fneg_v8f32:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; ZVFH-NEXT: vle32.v v8, (a0)
+; ZVFH-NEXT: vfneg.v v8, v8
+; ZVFH-NEXT: vse32.v v8, (a0)
+; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: fneg_v8f32:
; ZVFHMIN: # %bb.0:
@@ -3016,25 +2092,13 @@ define void @fneg_v8f32(ptr %x) {
}
define void @fneg_v4f64(ptr %x) {
-; LMULMAX2-LABEL: fneg_v4f64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vfneg.v v8, v8
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: fneg_v4f64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vle64.v v8, (a1)
-; LMULMAX1-NEXT: vle64.v v9, (a0)
-; LMULMAX1-NEXT: vfneg.v v8, v8
-; LMULMAX1-NEXT: vfneg.v v9, v9
-; LMULMAX1-NEXT: vse64.v v9, (a0)
-; LMULMAX1-NEXT: vse64.v v8, (a1)
-; LMULMAX1-NEXT: ret
+; ZVFH-LABEL: fneg_v4f64:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; ZVFH-NEXT: vle64.v v8, (a0)
+; ZVFH-NEXT: vfneg.v v8, v8
+; ZVFH-NEXT: vse64.v v8, (a0)
+; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: fneg_v4f64:
; ZVFHMIN: # %bb.0:
@@ -3050,49 +2114,31 @@ define void @fneg_v4f64(ptr %x) {
}
define void @fma_v16f16(ptr %x, ptr %y, ptr %z) {
-; LMULMAX2-LABEL: fma_v16f16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vle16.v v10, (a1)
-; LMULMAX2-NEXT: vle16.v v12, (a2)
-; LMULMAX2-NEXT: vfmacc.vv v12, v8, v10
-; LMULMAX2-NEXT: vse16.v v12, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: fma_v16f16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vle16.v v8, (a0)
-; LMULMAX1-NEXT: addi a3, a0, 16
-; LMULMAX1-NEXT: vle16.v v9, (a3)
-; LMULMAX1-NEXT: vle16.v v10, (a1)
-; LMULMAX1-NEXT: addi a1, a1, 16
-; LMULMAX1-NEXT: vle16.v v11, (a1)
-; LMULMAX1-NEXT: addi a1, a2, 16
-; LMULMAX1-NEXT: vle16.v v12, (a1)
-; LMULMAX1-NEXT: vle16.v v13, (a2)
-; LMULMAX1-NEXT: vfmacc.vv v12, v9, v11
-; LMULMAX1-NEXT: vfmacc.vv v13, v8, v10
-; LMULMAX1-NEXT: vse16.v v13, (a0)
-; LMULMAX1-NEXT: vse16.v v12, (a3)
-; LMULMAX1-NEXT: ret
-;
-; ZVFHMINLMULMAX2-LABEL: fma_v16f16:
-; ZVFHMINLMULMAX2: # %bb.0:
-; ZVFHMINLMULMAX2-NEXT: vsetivli zero, 16, e16, m1, ta, ma
-; ZVFHMINLMULMAX2-NEXT: vle16.v v8, (a2)
-; ZVFHMINLMULMAX2-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX2-NEXT: vle16.v v10, (a1)
-; ZVFHMINLMULMAX2-NEXT: vfwcvt.f.f.v v12, v8
-; ZVFHMINLMULMAX2-NEXT: vfwcvt.f.f.v v14, v9
-; ZVFHMINLMULMAX2-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX2-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMINLMULMAX2-NEXT: vfmadd.vv v8, v14, v12
-; ZVFHMINLMULMAX2-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; ZVFHMINLMULMAX2-NEXT: vfncvt.f.f.w v10, v8
-; ZVFHMINLMULMAX2-NEXT: vse16.v v10, (a0)
-; ZVFHMINLMULMAX2-NEXT: ret
+; ZVFH-LABEL: fma_v16f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; ZVFH-NEXT: vle16.v v8, (a0)
+; ZVFH-NEXT: vle16.v v10, (a1)
+; ZVFH-NEXT: vle16.v v12, (a2)
+; ZVFH-NEXT: vfmacc.vv v12, v8, v10
+; ZVFH-NEXT: vse16.v v12, (a0)
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: fma_v16f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vle16.v v8, (a2)
+; ZVFHMIN-NEXT: vle16.v v9, (a0)
+; ZVFHMIN-NEXT: vle16.v v10, (a1)
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v9
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vfmadd.vv v8, v14, v12
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT: vse16.v v10, (a0)
+; ZVFHMIN-NEXT: ret
%a = load <16 x half>, ptr %x
%b = load <16 x half>, ptr %y
%c = load <16 x half>, ptr %z
@@ -3103,33 +2149,15 @@ define void @fma_v16f16(ptr %x, ptr %y, ptr %z) {
declare <16 x half> @llvm.fma.v16f16(<16 x half>, <16 x half>, <16 x half>)
define void @fma_v8f32(ptr %x, ptr %y, ptr %z) {
-; LMULMAX2-LABEL: fma_v8f32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vle32.v v10, (a1)
-; LMULMAX2-NEXT: vle32.v v12, (a2)
-; LMULMAX2-NEXT: vfmacc.vv v12, v8, v10
-; LMULMAX2-NEXT: vse32.v v12, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: fma_v8f32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vle32.v v8, (a0)
-; LMULMAX1-NEXT: addi a3, a0, 16
-; LMULMAX1-NEXT: vle32.v v9, (a3)
-; LMULMAX1-NEXT: vle32.v v10, (a1)
-; LMULMAX1-NEXT: addi a1, a1, 16
-; LMULMAX1-NEXT: vle32.v v11, (a1)
-; LMULMAX1-NEXT: addi a1, a2, 16
-; LMULMAX1-NEXT: vle32.v v12, (a1)
-; LMULMAX1-NEXT: vle32.v v13, (a2)
-; LMULMAX1-NEXT: vfmacc.vv v12, v9, v11
-; LMULMAX1-NEXT: vfmacc.vv v13, v8, v10
-; LMULMAX1-NEXT: vse32.v v13, (a0)
-; LMULMAX1-NEXT: vse32.v v12, (a3)
-; LMULMAX1-NEXT: ret
+; ZVFH-LABEL: fma_v8f32:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; ZVFH-NEXT: vle32.v v8, (a0)
+; ZVFH-NEXT: vle32.v v10, (a1)
+; ZVFH-NEXT: vle32.v v12, (a2)
+; ZVFH-NEXT: vfmacc.vv v12, v8, v10
+; ZVFH-NEXT: vse32.v v12, (a0)
+; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: fma_v8f32:
; ZVFHMIN: # %bb.0:
@@ -3150,33 +2178,15 @@ define void @fma_v8f32(ptr %x, ptr %y, ptr %z) {
declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>)
define void @fma_v4f64(ptr %x, ptr %y, ptr %z) {
-; LMULMAX2-LABEL: fma_v4f64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vle64.v v10, (a1)
-; LMULMAX2-NEXT: vle64.v v12, (a2)
-; LMULMAX2-NEXT: vfmacc.vv v12, v8, v10
-; LMULMAX2-NEXT: vse64.v v12, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: fma_v4f64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vle64.v v8, (a0)
-; LMULMAX1-NEXT: addi a3, a0, 16
-; LMULMAX1-NEXT: vle64.v v9, (a3)
-; LMULMAX1-NEXT: vle64.v v10, (a1)
-; LMULMAX1-NEXT: addi a1, a1, 16
-; LMULMAX1-NEXT: vle64.v v11, (a1)
-; LMULMAX1-NEXT: addi a1, a2, 16
-; LMULMAX1-NEXT: vle64.v v12, (a1)
-; LMULMAX1-NEXT: vle64.v v13, (a2)
-; LMULMAX1-NEXT: vfmacc.vv v12, v9, v11
-; LMULMAX1-NEXT: vfmacc.vv v13, v8, v10
-; LMULMAX1-NEXT: vse64.v v13, (a0)
-; LMULMAX1-NEXT: vse64.v v12, (a3)
-; LMULMAX1-NEXT: ret
+; ZVFH-LABEL: fma_v4f64:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; ZVFH-NEXT: vle64.v v8, (a0)
+; ZVFH-NEXT: vle64.v v10, (a1)
+; ZVFH-NEXT: vle64.v v12, (a2)
+; ZVFH-NEXT: vfmacc.vv v12, v8, v10
+; ZVFH-NEXT: vse64.v v12, (a0)
+; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: fma_v4f64:
; ZVFHMIN: # %bb.0:
@@ -3242,99 +2252,52 @@ define void @fadd_vf_v6f16(ptr %x, half %y) {
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: fadd_vf_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfadd.vv v8, v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: fadd_vf_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfadd.vv v8, v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: fadd_vf_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfadd.vv v8, v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: fadd_vf_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfadd.vv v8, v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: fadd_vf_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV32-NEXT: fcvt.s.h fa5, fa0
+; ZVFHMIN-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfmv.v.f v9, fa5
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v10, v9
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v8, v10
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfadd.vv v8, v9, v8
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vse32.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: fadd_vf_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: fcvt.s.h fa5, fa0
+; ZVFHMIN-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfmv.v.f v9, fa5
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v10, v9
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v8, v10
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfadd.vv v8, v9, v8
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = insertelement <6 x half> poison, half %y, i32 0
%c = shufflevector <6 x half> %b, <6 x half> poison, <6 x i32> zeroinitializer
@@ -3429,99 +2392,52 @@ define void @fadd_fv_v6f16(ptr %x, half %y) {
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: fadd_fv_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfadd.vv v8, v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: fadd_fv_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfadd.vv v8, v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: fadd_fv_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfadd.vv v8, v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: fadd_fv_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfadd.vv v8, v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: fadd_fv_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV32-NEXT: fcvt.s.h fa5, fa0
+; ZVFHMIN-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfmv.v.f v9, fa5
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v10, v9
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v8, v10
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfadd.vv v8, v8, v9
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vse32.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: fadd_fv_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: fcvt.s.h fa5, fa0
+; ZVFHMIN-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfmv.v.f v9, fa5
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v10, v9
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v8, v10
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfadd.vv v8, v8, v9
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = insertelement <6 x half> poison, half %y, i32 0
%c = shufflevector <6 x half> %b, <6 x half> poison, <6 x i32> zeroinitializer
@@ -3616,99 +2532,52 @@ define void @fsub_vf_v6f16(ptr %x, half %y) {
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: fsub_vf_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfsub.vv v8, v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: fsub_vf_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfsub.vv v8, v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: fsub_vf_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfsub.vv v8, v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: fsub_vf_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfsub.vv v8, v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: fsub_vf_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV32-NEXT: fcvt.s.h fa5, fa0
+; ZVFHMIN-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfmv.v.f v9, fa5
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v10, v9
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v8, v10
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfsub.vv v8, v9, v8
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vse32.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: fsub_vf_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: fcvt.s.h fa5, fa0
+; ZVFHMIN-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfmv.v.f v9, fa5
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v10, v9
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v8, v10
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfsub.vv v8, v9, v8
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = insertelement <6 x half> poison, half %y, i32 0
%c = shufflevector <6 x half> %b, <6 x half> poison, <6 x i32> zeroinitializer
@@ -3803,99 +2672,52 @@ define void @fsub_fv_v6f16(ptr %x, half %y) {
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: fsub_fv_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfsub.vv v8, v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: fsub_fv_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfsub.vv v8, v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: fsub_fv_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfsub.vv v8, v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: fsub_fv_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfsub.vv v8, v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: fsub_fv_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV32-NEXT: fcvt.s.h fa5, fa0
+; ZVFHMIN-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfmv.v.f v9, fa5
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v10, v9
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v8, v10
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfsub.vv v8, v8, v9
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vse32.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: fsub_fv_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: fcvt.s.h fa5, fa0
+; ZVFHMIN-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfmv.v.f v9, fa5
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v10, v9
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v8, v10
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfsub.vv v8, v8, v9
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = insertelement <6 x half> poison, half %y, i32 0
%c = shufflevector <6 x half> %b, <6 x half> poison, <6 x i32> zeroinitializer
@@ -3990,99 +2812,52 @@ define void @fmul_vf_v6f16(ptr %x, half %y) {
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: fmul_vf_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfmul.vv v8, v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: fmul_vf_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfmul.vv v8, v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: fmul_vf_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfmul.vv v8, v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: fmul_vf_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfmul.vv v8, v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: fmul_vf_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV32-NEXT: fcvt.s.h fa5, fa0
+; ZVFHMIN-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfmv.v.f v9, fa5
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v10, v9
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v8, v10
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfmul.vv v8, v9, v8
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vse32.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: fmul_vf_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: fcvt.s.h fa5, fa0
+; ZVFHMIN-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfmv.v.f v9, fa5
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v10, v9
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v8, v10
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfmul.vv v8, v9, v8
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = insertelement <6 x half> poison, half %y, i32 0
%c = shufflevector <6 x half> %b, <6 x half> poison, <6 x i32> zeroinitializer
@@ -4177,99 +2952,52 @@ define void @fmul_fv_v6f16(ptr %x, half %y) {
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: fmul_fv_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfmul.vv v8, v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: fmul_fv_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfmul.vv v8, v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: fmul_fv_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfmul.vv v8, v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: fmul_fv_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfmul.vv v8, v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: fmul_fv_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV32-NEXT: fcvt.s.h fa5, fa0
+; ZVFHMIN-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfmv.v.f v9, fa5
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v10, v9
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v8, v10
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfmul.vv v8, v8, v9
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vse32.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: fmul_fv_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: fcvt.s.h fa5, fa0
+; ZVFHMIN-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfmv.v.f v9, fa5
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v10, v9
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v8, v10
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfmul.vv v8, v8, v9
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = insertelement <6 x half> poison, half %y, i32 0
%c = shufflevector <6 x half> %b, <6 x half> poison, <6 x i32> zeroinitializer
@@ -4364,99 +3092,52 @@ define void @fdiv_vf_v6f16(ptr %x, half %y) {
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: fdiv_vf_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfdiv.vv v8, v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: fdiv_vf_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfdiv.vv v8, v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: fdiv_vf_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfdiv.vv v8, v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: fdiv_vf_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfdiv.vv v8, v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: fdiv_vf_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV32-NEXT: fcvt.s.h fa5, fa0
+; ZVFHMIN-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfmv.v.f v9, fa5
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v10, v9
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v8, v10
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfdiv.vv v8, v9, v8
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vse32.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: fdiv_vf_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: fcvt.s.h fa5, fa0
+; ZVFHMIN-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfmv.v.f v9, fa5
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v10, v9
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v8, v10
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfdiv.vv v8, v9, v8
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = insertelement <6 x half> poison, half %y, i32 0
%c = shufflevector <6 x half> %b, <6 x half> poison, <6 x i32> zeroinitializer
@@ -4551,99 +3232,52 @@ define void @fdiv_fv_v6f16(ptr %x, half %y) {
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: fdiv_fv_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfdiv.vv v8, v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: fdiv_fv_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfdiv.vv v8, v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: fdiv_fv_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfdiv.vv v8, v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: fdiv_fv_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfmv.v.f v9, fa5
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v8, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfdiv.vv v8, v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: fdiv_fv_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV32-NEXT: fcvt.s.h fa5, fa0
+; ZVFHMIN-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfmv.v.f v9, fa5
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v10, v9
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v8, v10
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfdiv.vv v8, v8, v9
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vse32.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: fdiv_fv_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: fcvt.s.h fa5, fa0
+; ZVFHMIN-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfmv.v.f v9, fa5
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v10, v9
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v8, v10
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfdiv.vv v8, v8, v9
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = insertelement <6 x half> poison, half %y, i32 0
%c = shufflevector <6 x half> %b, <6 x half> poison, <6 x i32> zeroinitializer
@@ -4743,107 +3377,56 @@ define void @fma_vf_v6f16(ptr %x, ptr %y, half %z) {
; ZVFH-NEXT: vse16.v v9, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: fma_vf_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v9, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfmv.v.f v10, fa5
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v11, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v10, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v8, v11
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfmadd.vv v8, v9, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: fma_vf_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v9, (a1)
-; ZVFHMINLMULMAX2-RV64-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfmv.v.f v10, fa5
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v11, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v10, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v8, v11
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfmadd.vv v8, v9, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: fma_vf_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v9, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfmv.v.f v10, fa5
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v11, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v10, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v8, v11
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfmadd.vv v8, v9, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: fma_vf_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v9, (a1)
-; ZVFHMINLMULMAX1-RV64-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfmv.v.f v10, fa5
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v11, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v10, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v8, v11
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfmadd.vv v8, v9, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: fma_vf_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV32-NEXT: vle16.v v9, (a1)
+; ZVFHMIN-RV32-NEXT: fcvt.s.h fa5, fa0
+; ZVFHMIN-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfmv.v.f v10, fa5
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v11, v10
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v10, v9
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v8, v11
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfmadd.vv v8, v9, v10
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vse32.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: fma_vf_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: vle16.v v9, (a1)
+; ZVFHMIN-RV64-NEXT: fcvt.s.h fa5, fa0
+; ZVFHMIN-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfmv.v.f v10, fa5
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v11, v10
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v10, v9
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v8, v11
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfmadd.vv v8, v9, v10
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = load <6 x half>, ptr %y
%c = insertelement <6 x half> poison, half %z, i32 0
@@ -4949,107 +3532,56 @@ define void @fma_fv_v6f16(ptr %x, ptr %y, half %z) {
; ZVFH-NEXT: vse16.v v9, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: fma_fv_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v9, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfmv.v.f v10, fa5
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v11, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v10, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v8, v11
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfmadd.vv v8, v9, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: fma_fv_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v9, (a1)
-; ZVFHMINLMULMAX2-RV64-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfmv.v.f v10, fa5
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v11, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v10, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v8, v11
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfmadd.vv v8, v9, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: fma_fv_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v9, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfmv.v.f v10, fa5
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v11, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v10, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v8, v11
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfmadd.vv v8, v9, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: fma_fv_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v9, (a1)
-; ZVFHMINLMULMAX1-RV64-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfmv.v.f v10, fa5
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v11, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v10, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v8, v11
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfmadd.vv v8, v9, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: fma_fv_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV32-NEXT: vle16.v v9, (a1)
+; ZVFHMIN-RV32-NEXT: fcvt.s.h fa5, fa0
+; ZVFHMIN-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfmv.v.f v10, fa5
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v11, v10
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v10, v9
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v8, v11
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfmadd.vv v8, v9, v10
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vse32.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: fma_fv_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: vle16.v v9, (a1)
+; ZVFHMIN-RV64-NEXT: fcvt.s.h fa5, fa0
+; ZVFHMIN-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfmv.v.f v10, fa5
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v11, v10
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v10, v9
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v8, v11
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfmadd.vv v8, v9, v10
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = load <6 x half>, ptr %y
%c = insertelement <6 x half> poison, half %z, i32 0
@@ -5161,127 +3693,66 @@ define void @fmsub_vf_v6f16(ptr %x, ptr %y, half %z) {
; ZVFH-NEXT: vse16.v v9, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: fmsub_vf_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v9, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfmv.v.f v10, fa5
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v11, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v10, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfneg.v v9, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v8, v11
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v11, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfmacc.vv v11, v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v8, v11
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v9, v8, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v9, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: fmsub_vf_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v9, (a1)
-; ZVFHMINLMULMAX2-RV64-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfmv.v.f v10, fa5
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v11, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v10, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfneg.v v9, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v8, v11
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v11, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfmacc.vv v11, v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v8, v11
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v8, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: fmsub_vf_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v9, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfmv.v.f v10, fa5
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v11, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v10, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfneg.v v9, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v8, v11
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v11, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfmacc.vv v11, v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v8, v11
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v9, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: fmsub_vf_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v9, (a1)
-; ZVFHMINLMULMAX1-RV64-NEXT: fcvt.s.h fa5, fa0
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfmv.v.f v10, fa5
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v11, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v10, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfneg.v v9, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v8, v11
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v11, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfmacc.vv v11, v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v8, v11
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: fmsub_vf_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV32-NEXT: vle16.v v9, (a1)
+; ZVFHMIN-RV32-NEXT: fcvt.s.h fa5, fa0
+; ZVFHMIN-RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfmv.v.f v10, fa5
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v11, v10
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v10, v9
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfneg.v v9, v10
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v10, v9
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v8, v11
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v11, v10
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfmacc.vv v11, v9, v8
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v8, v11
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v9, v8, 2
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vse32.v v9, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v8, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: fmsub_vf_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: vle16.v v9, (a1)
+; ZVFHMIN-RV64-NEXT: fcvt.s.h fa5, fa0
+; ZVFHMIN-RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfmv.v.f v10, fa5
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v11, v10
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v10, v9
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfneg.v v9, v10
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v10, v9
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v8, v11
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v11, v10
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfmacc.vv v11, v9, v8
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v8, v11
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v8, 2
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = load <6 x half>, ptr %y
%c = insertelement <6 x half> poison, half %z, i32 0
@@ -5558,107 +4029,56 @@ define void @ceil_v6f16(ptr %x) {
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: ceil_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfabs.v v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: lui a1, 307200
-; ZVFHMINLMULMAX2-RV32-NEXT: fmv.w.x fa5, a1
-; ZVFHMINLMULMAX2-RV32-NEXT: vmflt.vf v0, v8, fa5
-; ZVFHMINLMULMAX2-RV32-NEXT: fsrmi a1, 3
-; ZVFHMINLMULMAX2-RV32-NEXT: vfcvt.x.f.v v8, v9, v0.t
-; ZVFHMINLMULMAX2-RV32-NEXT: fsrm a1
-; ZVFHMINLMULMAX2-RV32-NEXT: vfcvt.f.x.v v8, v8, v0.t
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMINLMULMAX2-RV32-NEXT: vfsgnj.vv v9, v8, v9, v0.t
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v9, v8, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v9, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: ceil_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfabs.v v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: lui a1, 307200
-; ZVFHMINLMULMAX2-RV64-NEXT: fmv.w.x fa5, a1
-; ZVFHMINLMULMAX2-RV64-NEXT: vmflt.vf v0, v8, fa5
-; ZVFHMINLMULMAX2-RV64-NEXT: fsrmi a1, 3
-; ZVFHMINLMULMAX2-RV64-NEXT: vfcvt.x.f.v v8, v9, v0.t
-; ZVFHMINLMULMAX2-RV64-NEXT: fsrm a1
-; ZVFHMINLMULMAX2-RV64-NEXT: vfcvt.f.x.v v8, v8, v0.t
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMINLMULMAX2-RV64-NEXT: vfsgnj.vv v9, v8, v9, v0.t
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v8, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: ceil_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfabs.v v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: lui a1, 307200
-; ZVFHMINLMULMAX1-RV32-NEXT: fmv.w.x fa5, a1
-; ZVFHMINLMULMAX1-RV32-NEXT: vmflt.vf v0, v8, fa5
-; ZVFHMINLMULMAX1-RV32-NEXT: fsrmi a1, 3
-; ZVFHMINLMULMAX1-RV32-NEXT: vfcvt.x.f.v v8, v9, v0.t
-; ZVFHMINLMULMAX1-RV32-NEXT: fsrm a1
-; ZVFHMINLMULMAX1-RV32-NEXT: vfcvt.f.x.v v8, v8, v0.t
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMINLMULMAX1-RV32-NEXT: vfsgnj.vv v9, v8, v9, v0.t
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v9, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: ceil_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfabs.v v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: lui a1, 307200
-; ZVFHMINLMULMAX1-RV64-NEXT: fmv.w.x fa5, a1
-; ZVFHMINLMULMAX1-RV64-NEXT: vmflt.vf v0, v8, fa5
-; ZVFHMINLMULMAX1-RV64-NEXT: fsrmi a1, 3
-; ZVFHMINLMULMAX1-RV64-NEXT: vfcvt.x.f.v v8, v9, v0.t
-; ZVFHMINLMULMAX1-RV64-NEXT: fsrm a1
-; ZVFHMINLMULMAX1-RV64-NEXT: vfcvt.f.x.v v8, v8, v0.t
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMINLMULMAX1-RV64-NEXT: vfsgnj.vv v9, v8, v9, v0.t
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: ceil_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfabs.v v8, v9
+; ZVFHMIN-RV32-NEXT: lui a1, 307200
+; ZVFHMIN-RV32-NEXT: fmv.w.x fa5, a1
+; ZVFHMIN-RV32-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-RV32-NEXT: fsrmi a1, 3
+; ZVFHMIN-RV32-NEXT: vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-RV32-NEXT: fsrm a1
+; ZVFHMIN-RV32-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu
+; ZVFHMIN-RV32-NEXT: vfsgnj.vv v9, v8, v9, v0.t
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v9, v8, 2
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vse32.v v9, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v8, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: ceil_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfabs.v v8, v9
+; ZVFHMIN-RV64-NEXT: lui a1, 307200
+; ZVFHMIN-RV64-NEXT: fmv.w.x fa5, a1
+; ZVFHMIN-RV64-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-RV64-NEXT: fsrmi a1, 3
+; ZVFHMIN-RV64-NEXT: vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-RV64-NEXT: fsrm a1
+; ZVFHMIN-RV64-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu
+; ZVFHMIN-RV64-NEXT: vfsgnj.vv v9, v8, v9, v0.t
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v8, 2
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = call <6 x half> @llvm.ceil.v6f16(<6 x half> %a)
store <6 x half> %b, ptr %x
@@ -5796,107 +4216,56 @@ define void @floor_v6f16(ptr %x) {
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: floor_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfabs.v v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: lui a1, 307200
-; ZVFHMINLMULMAX2-RV32-NEXT: fmv.w.x fa5, a1
-; ZVFHMINLMULMAX2-RV32-NEXT: vmflt.vf v0, v8, fa5
-; ZVFHMINLMULMAX2-RV32-NEXT: fsrmi a1, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: vfcvt.x.f.v v8, v9, v0.t
-; ZVFHMINLMULMAX2-RV32-NEXT: fsrm a1
-; ZVFHMINLMULMAX2-RV32-NEXT: vfcvt.f.x.v v8, v8, v0.t
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMINLMULMAX2-RV32-NEXT: vfsgnj.vv v9, v8, v9, v0.t
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v9, v8, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v9, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: floor_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfabs.v v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: lui a1, 307200
-; ZVFHMINLMULMAX2-RV64-NEXT: fmv.w.x fa5, a1
-; ZVFHMINLMULMAX2-RV64-NEXT: vmflt.vf v0, v8, fa5
-; ZVFHMINLMULMAX2-RV64-NEXT: fsrmi a1, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: vfcvt.x.f.v v8, v9, v0.t
-; ZVFHMINLMULMAX2-RV64-NEXT: fsrm a1
-; ZVFHMINLMULMAX2-RV64-NEXT: vfcvt.f.x.v v8, v8, v0.t
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMINLMULMAX2-RV64-NEXT: vfsgnj.vv v9, v8, v9, v0.t
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v8, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: floor_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfabs.v v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: lui a1, 307200
-; ZVFHMINLMULMAX1-RV32-NEXT: fmv.w.x fa5, a1
-; ZVFHMINLMULMAX1-RV32-NEXT: vmflt.vf v0, v8, fa5
-; ZVFHMINLMULMAX1-RV32-NEXT: fsrmi a1, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: vfcvt.x.f.v v8, v9, v0.t
-; ZVFHMINLMULMAX1-RV32-NEXT: fsrm a1
-; ZVFHMINLMULMAX1-RV32-NEXT: vfcvt.f.x.v v8, v8, v0.t
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMINLMULMAX1-RV32-NEXT: vfsgnj.vv v9, v8, v9, v0.t
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v9, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: floor_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfabs.v v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: lui a1, 307200
-; ZVFHMINLMULMAX1-RV64-NEXT: fmv.w.x fa5, a1
-; ZVFHMINLMULMAX1-RV64-NEXT: vmflt.vf v0, v8, fa5
-; ZVFHMINLMULMAX1-RV64-NEXT: fsrmi a1, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: vfcvt.x.f.v v8, v9, v0.t
-; ZVFHMINLMULMAX1-RV64-NEXT: fsrm a1
-; ZVFHMINLMULMAX1-RV64-NEXT: vfcvt.f.x.v v8, v8, v0.t
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMINLMULMAX1-RV64-NEXT: vfsgnj.vv v9, v8, v9, v0.t
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: floor_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfabs.v v8, v9
+; ZVFHMIN-RV32-NEXT: lui a1, 307200
+; ZVFHMIN-RV32-NEXT: fmv.w.x fa5, a1
+; ZVFHMIN-RV32-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-RV32-NEXT: fsrmi a1, 2
+; ZVFHMIN-RV32-NEXT: vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-RV32-NEXT: fsrm a1
+; ZVFHMIN-RV32-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu
+; ZVFHMIN-RV32-NEXT: vfsgnj.vv v9, v8, v9, v0.t
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v9, v8, 2
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vse32.v v9, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v8, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: floor_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfabs.v v8, v9
+; ZVFHMIN-RV64-NEXT: lui a1, 307200
+; ZVFHMIN-RV64-NEXT: fmv.w.x fa5, a1
+; ZVFHMIN-RV64-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-RV64-NEXT: fsrmi a1, 2
+; ZVFHMIN-RV64-NEXT: vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-RV64-NEXT: fsrm a1
+; ZVFHMIN-RV64-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu
+; ZVFHMIN-RV64-NEXT: vfsgnj.vv v9, v8, v9, v0.t
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v8, 2
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = call <6 x half> @llvm.floor.v6f16(<6 x half> %a)
store <6 x half> %b, ptr %x
@@ -6034,107 +4403,56 @@ define void @round_v6f16(ptr %x) {
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: round_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfabs.v v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: lui a1, 307200
-; ZVFHMINLMULMAX2-RV32-NEXT: fmv.w.x fa5, a1
-; ZVFHMINLMULMAX2-RV32-NEXT: vmflt.vf v0, v8, fa5
-; ZVFHMINLMULMAX2-RV32-NEXT: fsrmi a1, 4
-; ZVFHMINLMULMAX2-RV32-NEXT: vfcvt.x.f.v v8, v9, v0.t
-; ZVFHMINLMULMAX2-RV32-NEXT: fsrm a1
-; ZVFHMINLMULMAX2-RV32-NEXT: vfcvt.f.x.v v8, v8, v0.t
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMINLMULMAX2-RV32-NEXT: vfsgnj.vv v9, v8, v9, v0.t
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v9, v8, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v9, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: round_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfabs.v v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: lui a1, 307200
-; ZVFHMINLMULMAX2-RV64-NEXT: fmv.w.x fa5, a1
-; ZVFHMINLMULMAX2-RV64-NEXT: vmflt.vf v0, v8, fa5
-; ZVFHMINLMULMAX2-RV64-NEXT: fsrmi a1, 4
-; ZVFHMINLMULMAX2-RV64-NEXT: vfcvt.x.f.v v8, v9, v0.t
-; ZVFHMINLMULMAX2-RV64-NEXT: fsrm a1
-; ZVFHMINLMULMAX2-RV64-NEXT: vfcvt.f.x.v v8, v8, v0.t
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMINLMULMAX2-RV64-NEXT: vfsgnj.vv v9, v8, v9, v0.t
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v8, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: round_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfabs.v v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: lui a1, 307200
-; ZVFHMINLMULMAX1-RV32-NEXT: fmv.w.x fa5, a1
-; ZVFHMINLMULMAX1-RV32-NEXT: vmflt.vf v0, v8, fa5
-; ZVFHMINLMULMAX1-RV32-NEXT: fsrmi a1, 4
-; ZVFHMINLMULMAX1-RV32-NEXT: vfcvt.x.f.v v8, v9, v0.t
-; ZVFHMINLMULMAX1-RV32-NEXT: fsrm a1
-; ZVFHMINLMULMAX1-RV32-NEXT: vfcvt.f.x.v v8, v8, v0.t
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMINLMULMAX1-RV32-NEXT: vfsgnj.vv v9, v8, v9, v0.t
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v9, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: round_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfabs.v v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: lui a1, 307200
-; ZVFHMINLMULMAX1-RV64-NEXT: fmv.w.x fa5, a1
-; ZVFHMINLMULMAX1-RV64-NEXT: vmflt.vf v0, v8, fa5
-; ZVFHMINLMULMAX1-RV64-NEXT: fsrmi a1, 4
-; ZVFHMINLMULMAX1-RV64-NEXT: vfcvt.x.f.v v8, v9, v0.t
-; ZVFHMINLMULMAX1-RV64-NEXT: fsrm a1
-; ZVFHMINLMULMAX1-RV64-NEXT: vfcvt.f.x.v v8, v8, v0.t
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMINLMULMAX1-RV64-NEXT: vfsgnj.vv v9, v8, v9, v0.t
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: round_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfabs.v v8, v9
+; ZVFHMIN-RV32-NEXT: lui a1, 307200
+; ZVFHMIN-RV32-NEXT: fmv.w.x fa5, a1
+; ZVFHMIN-RV32-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-RV32-NEXT: fsrmi a1, 4
+; ZVFHMIN-RV32-NEXT: vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-RV32-NEXT: fsrm a1
+; ZVFHMIN-RV32-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu
+; ZVFHMIN-RV32-NEXT: vfsgnj.vv v9, v8, v9, v0.t
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v9, v8, 2
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vse32.v v9, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v8, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: round_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfabs.v v8, v9
+; ZVFHMIN-RV64-NEXT: lui a1, 307200
+; ZVFHMIN-RV64-NEXT: fmv.w.x fa5, a1
+; ZVFHMIN-RV64-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-RV64-NEXT: fsrmi a1, 4
+; ZVFHMIN-RV64-NEXT: vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-RV64-NEXT: fsrm a1
+; ZVFHMIN-RV64-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu
+; ZVFHMIN-RV64-NEXT: vfsgnj.vv v9, v8, v9, v0.t
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v8, 2
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = call <6 x half> @llvm.round.v6f16(<6 x half> %a)
store <6 x half> %b, ptr %x
@@ -6470,107 +4788,56 @@ define void @fmuladd_v6f16(ptr %x, ptr %y, ptr %z) {
; ZVFH-NEXT: vse16.v v10, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: fmuladd_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v10, (a2)
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v11, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfmul.vv v8, v8, v11
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v9, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfadd.vv v8, v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: fmuladd_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v10, (a2)
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v11, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfmul.vv v8, v8, v11
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v9, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfadd.vv v8, v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: fmuladd_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v10, (a2)
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v11, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfmul.vv v8, v8, v11
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v9, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfadd.vv v8, v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: fmuladd_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v10, (a2)
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v11, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfmul.vv v8, v8, v11
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v9, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfadd.vv v8, v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: fmuladd_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vle16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: vle16.v v10, (a2)
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v11, v8
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfmul.vv v8, v8, v11
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v9, v10
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfadd.vv v8, v8, v9
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vse32.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: fmuladd_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a1)
+; ZVFHMIN-RV64-NEXT: vle16.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vle16.v v10, (a2)
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v11, v8
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfmul.vv v8, v8, v11
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v9, v10
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfadd.vv v8, v8, v9
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = load <6 x half>, ptr %y
%c = load <6 x half>, ptr %z
@@ -6681,107 +4948,56 @@ define void @fmsub_fmuladd_v6f16(ptr %x, ptr %y, ptr %z) {
; ZVFH-NEXT: vse16.v v10, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMINLMULMAX2-RV32-LABEL: fmsub_fmuladd_v6f16:
-; ZVFHMINLMULMAX2-RV32: # %bb.0:
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: vle16.v v10, (a2)
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v11, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfmul.vv v8, v8, v11
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vfwcvt.f.f.v v9, v10
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfsub.vv v8, v8, v9
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX2-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX2-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX2-RV64-LABEL: fmsub_fmuladd_v6f16:
-; ZVFHMINLMULMAX2-RV64: # %bb.0:
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vle16.v v10, (a2)
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v11, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfmul.vv v8, v8, v11
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vfwcvt.f.f.v v9, v10
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfsub.vv v8, v8, v9
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX2-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX2-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX2-RV64-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV32-LABEL: fmsub_fmuladd_v6f16:
-; ZVFHMINLMULMAX1-RV32: # %bb.0:
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: vle16.v v10, (a2)
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v11, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfmul.vv v8, v8, v11
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vfwcvt.f.f.v v9, v10
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfsub.vv v8, v8, v9
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV32-NEXT: addi a1, a0, 8
-; ZVFHMINLMULMAX1-RV32-NEXT: vse32.v v8, (a1)
-; ZVFHMINLMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
-; ZVFHMINLMULMAX1-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV32-NEXT: ret
-;
-; ZVFHMINLMULMAX1-RV64-LABEL: fmsub_fmuladd_v6f16:
-; ZVFHMINLMULMAX1-RV64: # %bb.0:
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v8, (a1)
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vle16.v v10, (a2)
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v11, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfmul.vv v8, v8, v11
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vfwcvt.f.f.v v9, v10
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfsub.vv v8, v8, v9
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vfncvt.f.f.w v9, v8
-; ZVFHMINLMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMINLMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMINLMULMAX1-RV64-NEXT: addi a0, a0, 8
-; ZVFHMINLMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; ZVFHMINLMULMAX1-RV64-NEXT: ret
+; ZVFHMIN-RV32-LABEL: fmsub_fmuladd_v6f16:
+; ZVFHMIN-RV32: # %bb.0:
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vle16.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vle16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: vle16.v v10, (a2)
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v11, v8
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfmul.vv v8, v8, v11
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFHMIN-RV32-NEXT: vfwcvt.f.f.v v9, v10
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV32-NEXT: vfsub.vv v8, v8, v9
+; ZVFHMIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV32-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV32-NEXT: addi a1, a0, 8
+; ZVFHMIN-RV32-NEXT: vse32.v v8, (a1)
+; ZVFHMIN-RV32-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-RV32-NEXT: vse16.v v9, (a0)
+; ZVFHMIN-RV32-NEXT: ret
+;
+; ZVFHMIN-RV64-LABEL: fmsub_fmuladd_v6f16:
+; ZVFHMIN-RV64: # %bb.0:
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vle16.v v8, (a1)
+; ZVFHMIN-RV64-NEXT: vle16.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vle16.v v10, (a2)
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v11, v8
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfmul.vv v8, v8, v11
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFHMIN-RV64-NEXT: vfwcvt.f.f.v v9, v10
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-RV64-NEXT: vfsub.vv v8, v8, v9
+; ZVFHMIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vfncvt.f.f.w v9, v8
+; ZVFHMIN-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; ZVFHMIN-RV64-NEXT: vse64.v v9, (a0)
+; ZVFHMIN-RV64-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN-RV64-NEXT: addi a0, a0, 8
+; ZVFHMIN-RV64-NEXT: vse32.v v8, (a0)
+; ZVFHMIN-RV64-NEXT: ret
%a = load <6 x half>, ptr %x
%b = load <6 x half>, ptr %y
%c = load <6 x half>, ptr %z
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll
index ed0b15c..85b8490 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll
@@ -359,28 +359,28 @@ define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) {
; RV32-NEXT: feq.d a0, fa3, fa3
; RV32-NEXT: fmax.d fa3, fa3, fa5
; RV32-NEXT: fmin.d fa3, fa3, fa4
+; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; RV32-NEXT: fld fa2, 40(sp)
; RV32-NEXT: fcvt.w.d a2, fa3, rtz
-; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; RV32-NEXT: fld fa3, 32(sp)
; RV32-NEXT: neg a0, a0
; RV32-NEXT: and a0, a0, a2
-; RV32-NEXT: vslide1down.vx v8, v10, a0
-; RV32-NEXT: feq.d a0, fa3, fa3
-; RV32-NEXT: fmax.d fa3, fa3, fa5
+; RV32-NEXT: feq.d a2, fa2, fa2
+; RV32-NEXT: fmax.d fa3, fa2, fa5
; RV32-NEXT: fmin.d fa3, fa3, fa4
-; RV32-NEXT: fcvt.w.d a2, fa3, rtz
-; RV32-NEXT: fld fa3, 40(sp)
-; RV32-NEXT: neg a0, a0
-; RV32-NEXT: and a0, a0, a2
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: feq.d a0, fa3, fa3
+; RV32-NEXT: fcvt.w.d a3, fa3, rtz
+; RV32-NEXT: fld fa3, 32(sp)
+; RV32-NEXT: vslide1down.vx v8, v10, a0
+; RV32-NEXT: neg a0, a2
+; RV32-NEXT: and a0, a0, a3
+; RV32-NEXT: feq.d a2, fa3, fa3
+; RV32-NEXT: neg a2, a2
; RV32-NEXT: fmax.d fa3, fa3, fa5
; RV32-NEXT: fmin.d fa3, fa3, fa4
-; RV32-NEXT: fcvt.w.d a2, fa3, rtz
+; RV32-NEXT: fcvt.w.d a3, fa3, rtz
; RV32-NEXT: fld fa3, 48(sp)
-; RV32-NEXT: neg a0, a0
-; RV32-NEXT: and a0, a0, a2
-; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: and a2, a2, a3
+; RV32-NEXT: vmv.v.x v9, a2
+; RV32-NEXT: vslide1down.vx v9, v9, a0
; RV32-NEXT: feq.d a0, fa3, fa3
; RV32-NEXT: fmax.d fa3, fa3, fa5
; RV32-NEXT: fmin.d fa3, fa3, fa4
@@ -388,15 +388,17 @@ define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) {
; RV32-NEXT: fld fa3, 56(sp)
; RV32-NEXT: neg a0, a0
; RV32-NEXT: and a0, a0, a2
-; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslide1down.vx v9, v9, a0
; RV32-NEXT: feq.d a0, fa3, fa3
; RV32-NEXT: neg a0, a0
; RV32-NEXT: fmax.d fa5, fa3, fa5
; RV32-NEXT: fmin.d fa5, fa5, fa4
; RV32-NEXT: fcvt.w.d a2, fa5, rtz
; RV32-NEXT: and a0, a0, a2
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: vse8.v v8, (a1)
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: vslide1down.vx v9, v9, a0
+; RV32-NEXT: vslidedown.vi v9, v8, 4, v0.t
+; RV32-NEXT: vse8.v v9, (a1)
; RV32-NEXT: addi sp, s0, -128
; RV32-NEXT: lw ra, 124(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 120(sp) # 4-byte Folded Reload
@@ -458,28 +460,28 @@ define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) {
; RV64-NEXT: feq.d a0, fa3, fa3
; RV64-NEXT: fmax.d fa3, fa3, fa5
; RV64-NEXT: fmin.d fa3, fa3, fa4
+; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; RV64-NEXT: fld fa2, 40(sp)
; RV64-NEXT: fcvt.l.d a2, fa3, rtz
-; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; RV64-NEXT: fld fa3, 32(sp)
; RV64-NEXT: neg a0, a0
; RV64-NEXT: and a0, a0, a2
-; RV64-NEXT: vslide1down.vx v8, v10, a0
-; RV64-NEXT: feq.d a0, fa3, fa3
-; RV64-NEXT: fmax.d fa3, fa3, fa5
+; RV64-NEXT: feq.d a2, fa2, fa2
+; RV64-NEXT: fmax.d fa3, fa2, fa5
; RV64-NEXT: fmin.d fa3, fa3, fa4
-; RV64-NEXT: fcvt.l.d a2, fa3, rtz
-; RV64-NEXT: fld fa3, 40(sp)
-; RV64-NEXT: neg a0, a0
-; RV64-NEXT: and a0, a0, a2
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: feq.d a0, fa3, fa3
+; RV64-NEXT: fcvt.l.d a3, fa3, rtz
+; RV64-NEXT: fld fa3, 32(sp)
+; RV64-NEXT: vslide1down.vx v8, v10, a0
+; RV64-NEXT: neg a0, a2
+; RV64-NEXT: and a0, a0, a3
+; RV64-NEXT: feq.d a2, fa3, fa3
+; RV64-NEXT: negw a2, a2
; RV64-NEXT: fmax.d fa3, fa3, fa5
; RV64-NEXT: fmin.d fa3, fa3, fa4
-; RV64-NEXT: fcvt.l.d a2, fa3, rtz
+; RV64-NEXT: fcvt.l.d a3, fa3, rtz
; RV64-NEXT: fld fa3, 48(sp)
-; RV64-NEXT: neg a0, a0
-; RV64-NEXT: and a0, a0, a2
-; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: and a2, a2, a3
+; RV64-NEXT: vmv.v.x v9, a2
+; RV64-NEXT: vslide1down.vx v9, v9, a0
; RV64-NEXT: feq.d a0, fa3, fa3
; RV64-NEXT: fmax.d fa3, fa3, fa5
; RV64-NEXT: fmin.d fa3, fa3, fa4
@@ -487,15 +489,17 @@ define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) {
; RV64-NEXT: fld fa3, 56(sp)
; RV64-NEXT: neg a0, a0
; RV64-NEXT: and a0, a0, a2
-; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: vslide1down.vx v9, v9, a0
; RV64-NEXT: feq.d a0, fa3, fa3
; RV64-NEXT: neg a0, a0
; RV64-NEXT: fmax.d fa5, fa3, fa5
; RV64-NEXT: fmin.d fa5, fa5, fa4
; RV64-NEXT: fcvt.l.d a2, fa5, rtz
; RV64-NEXT: and a0, a0, a2
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: vse8.v v8, (a1)
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: vslide1down.vx v9, v9, a0
+; RV64-NEXT: vslidedown.vi v9, v8, 4, v0.t
+; RV64-NEXT: vse8.v v9, (a1)
; RV64-NEXT: addi sp, s0, -128
; RV64-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 112(sp) # 8-byte Folded Reload
@@ -553,11 +557,11 @@ define void @fp2ui_v8f64_v8i8(ptr %x, ptr %y) {
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vfmv.f.s fa4, v8
; RV32-NEXT: fmax.d fa4, fa4, fa3
-; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; RV32-NEXT: fld fa2, 32(sp)
+; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; RV32-NEXT: fld fa2, 40(sp)
; RV32-NEXT: fmin.d fa4, fa4, fa5
; RV32-NEXT: fcvt.wu.d a0, fa4, rtz
-; RV32-NEXT: fld fa4, 40(sp)
+; RV32-NEXT: fld fa4, 32(sp)
; RV32-NEXT: fmax.d fa2, fa2, fa3
; RV32-NEXT: fmin.d fa2, fa2, fa5
; RV32-NEXT: fcvt.wu.d a2, fa2, rtz
@@ -570,14 +574,16 @@ define void @fp2ui_v8f64_v8i8(ptr %x, ptr %y) {
; RV32-NEXT: fmin.d fa4, fa4, fa5
; RV32-NEXT: fcvt.wu.d a0, fa4, rtz
; RV32-NEXT: fld fa4, 56(sp)
-; RV32-NEXT: vslide1down.vx v8, v8, a2
-; RV32-NEXT: vslide1down.vx v8, v8, a3
-; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vmv.v.x v9, a3
+; RV32-NEXT: vslide1down.vx v9, v9, a2
+; RV32-NEXT: vslide1down.vx v9, v9, a0
; RV32-NEXT: fmax.d fa4, fa4, fa3
; RV32-NEXT: fmin.d fa5, fa4, fa5
; RV32-NEXT: fcvt.wu.d a0, fa5, rtz
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: vse8.v v8, (a1)
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: vslide1down.vx v9, v9, a0
+; RV32-NEXT: vslidedown.vi v9, v8, 4, v0.t
+; RV32-NEXT: vse8.v v9, (a1)
; RV32-NEXT: addi sp, s0, -128
; RV32-NEXT: lw ra, 124(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 120(sp) # 4-byte Folded Reload
@@ -627,11 +633,11 @@ define void @fp2ui_v8f64_v8i8(ptr %x, ptr %y) {
; RV64-NEXT: vslidedown.vi v8, v8, 3
; RV64-NEXT: vfmv.f.s fa4, v8
; RV64-NEXT: fmax.d fa4, fa4, fa3
-; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; RV64-NEXT: fld fa2, 32(sp)
+; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; RV64-NEXT: fld fa2, 40(sp)
; RV64-NEXT: fmin.d fa4, fa4, fa5
; RV64-NEXT: fcvt.lu.d a0, fa4, rtz
-; RV64-NEXT: fld fa4, 40(sp)
+; RV64-NEXT: fld fa4, 32(sp)
; RV64-NEXT: fmax.d fa2, fa2, fa3
; RV64-NEXT: fmin.d fa2, fa2, fa5
; RV64-NEXT: fcvt.lu.d a2, fa2, rtz
@@ -644,14 +650,16 @@ define void @fp2ui_v8f64_v8i8(ptr %x, ptr %y) {
; RV64-NEXT: fmin.d fa4, fa4, fa5
; RV64-NEXT: fcvt.lu.d a0, fa4, rtz
; RV64-NEXT: fld fa4, 56(sp)
-; RV64-NEXT: vslide1down.vx v8, v8, a2
-; RV64-NEXT: vslide1down.vx v8, v8, a3
-; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: vmv.v.x v9, a3
+; RV64-NEXT: vslide1down.vx v9, v9, a2
+; RV64-NEXT: vslide1down.vx v9, v9, a0
; RV64-NEXT: fmax.d fa4, fa4, fa3
; RV64-NEXT: fmin.d fa5, fa4, fa5
; RV64-NEXT: fcvt.lu.d a0, fa5, rtz
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: vse8.v v8, (a1)
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: vslide1down.vx v9, v9, a0
+; RV64-NEXT: vslidedown.vi v9, v8, 4, v0.t
+; RV64-NEXT: vse8.v v9, (a1)
; RV64-NEXT: addi sp, s0, -128
; RV64-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 112(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
index 44b96d0..dbc6562 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
@@ -1,12 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,LMULMAX8RV32,LMULMAX8RV32ZVFH
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,LMULMAX8RV64,LMULMAX8RV64ZVFH
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1RV32,LMULMAX1RV32ZVFH
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1RV64,LMULMAX1RV64ZVFH
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfhmin,+f,+d -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,LMULMAX8RV32,LMULMAX8RV32ZVFHMIN
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfhmin,+f,+d -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,LMULMAX8RV64,LMULMAX8RV64ZVFHMIN
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfhmin,+f,+d -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1RV32,LMULMAX1RV32ZVFHMIN
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfhmin,+f,+d -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1RV64,LMULMAX1RV64ZVFHMIN
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,ZVFH32
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,ZVFH64
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfhmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN32
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfhmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN64
define void @fp2si_v2f32_v2i32(ptr %x, ptr %y) {
; CHECK-LABEL: fp2si_v2f32_v2i32:
@@ -128,194 +124,194 @@ define <3 x i1> @fp2si_v3f32_v3i1(<3 x float> %x) {
; FIXME: This is expanded when they could be widened + promoted
define <3 x i15> @fp2si_v3f32_v3i15(<3 x float> %x) {
-; LMULMAX8RV32-LABEL: fp2si_v3f32_v3i15:
-; LMULMAX8RV32: # %bb.0:
-; LMULMAX8RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX8RV32-NEXT: vfncvt.rtz.x.f.w v9, v8
-; LMULMAX8RV32-NEXT: vslidedown.vi v8, v9, 2
-; LMULMAX8RV32-NEXT: vmv.x.s a1, v8
-; LMULMAX8RV32-NEXT: slli a2, a1, 17
-; LMULMAX8RV32-NEXT: srli a2, a2, 19
-; LMULMAX8RV32-NEXT: sh a2, 4(a0)
-; LMULMAX8RV32-NEXT: vmv.x.s a2, v9
-; LMULMAX8RV32-NEXT: lui a3, 8
-; LMULMAX8RV32-NEXT: addi a3, a3, -1
-; LMULMAX8RV32-NEXT: and a2, a2, a3
-; LMULMAX8RV32-NEXT: vslidedown.vi v8, v9, 1
-; LMULMAX8RV32-NEXT: vmv.x.s a4, v8
-; LMULMAX8RV32-NEXT: and a3, a4, a3
-; LMULMAX8RV32-NEXT: slli a3, a3, 15
-; LMULMAX8RV32-NEXT: slli a1, a1, 30
-; LMULMAX8RV32-NEXT: or a1, a2, a1
-; LMULMAX8RV32-NEXT: or a1, a1, a3
-; LMULMAX8RV32-NEXT: sw a1, 0(a0)
-; LMULMAX8RV32-NEXT: ret
+; ZVFH32-LABEL: fp2si_v3f32_v3i15:
+; ZVFH32: # %bb.0:
+; ZVFH32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFH32-NEXT: vfncvt.rtz.x.f.w v9, v8
+; ZVFH32-NEXT: vslidedown.vi v8, v9, 2
+; ZVFH32-NEXT: vmv.x.s a1, v8
+; ZVFH32-NEXT: slli a2, a1, 17
+; ZVFH32-NEXT: srli a2, a2, 19
+; ZVFH32-NEXT: sh a2, 4(a0)
+; ZVFH32-NEXT: vmv.x.s a2, v9
+; ZVFH32-NEXT: lui a3, 8
+; ZVFH32-NEXT: addi a3, a3, -1
+; ZVFH32-NEXT: and a2, a2, a3
+; ZVFH32-NEXT: vslidedown.vi v8, v9, 1
+; ZVFH32-NEXT: vmv.x.s a4, v8
+; ZVFH32-NEXT: and a3, a4, a3
+; ZVFH32-NEXT: slli a3, a3, 15
+; ZVFH32-NEXT: slli a1, a1, 30
+; ZVFH32-NEXT: or a1, a2, a1
+; ZVFH32-NEXT: or a1, a1, a3
+; ZVFH32-NEXT: sw a1, 0(a0)
+; ZVFH32-NEXT: ret
;
-; LMULMAX8RV64-LABEL: fp2si_v3f32_v3i15:
-; LMULMAX8RV64: # %bb.0:
-; LMULMAX8RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX8RV64-NEXT: vfncvt.rtz.x.f.w v9, v8
-; LMULMAX8RV64-NEXT: vmv.x.s a1, v9
-; LMULMAX8RV64-NEXT: lui a2, 8
-; LMULMAX8RV64-NEXT: addiw a2, a2, -1
-; LMULMAX8RV64-NEXT: and a1, a1, a2
-; LMULMAX8RV64-NEXT: vslidedown.vi v8, v9, 1
-; LMULMAX8RV64-NEXT: vmv.x.s a3, v8
-; LMULMAX8RV64-NEXT: and a2, a3, a2
-; LMULMAX8RV64-NEXT: slli a2, a2, 15
-; LMULMAX8RV64-NEXT: vslidedown.vi v8, v9, 2
-; LMULMAX8RV64-NEXT: vmv.x.s a3, v8
-; LMULMAX8RV64-NEXT: slli a3, a3, 30
-; LMULMAX8RV64-NEXT: or a1, a1, a3
-; LMULMAX8RV64-NEXT: or a1, a1, a2
-; LMULMAX8RV64-NEXT: sw a1, 0(a0)
-; LMULMAX8RV64-NEXT: slli a1, a1, 19
-; LMULMAX8RV64-NEXT: srli a1, a1, 51
-; LMULMAX8RV64-NEXT: sh a1, 4(a0)
-; LMULMAX8RV64-NEXT: ret
+; ZVFH64-LABEL: fp2si_v3f32_v3i15:
+; ZVFH64: # %bb.0:
+; ZVFH64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFH64-NEXT: vfncvt.rtz.x.f.w v9, v8
+; ZVFH64-NEXT: vmv.x.s a1, v9
+; ZVFH64-NEXT: lui a2, 8
+; ZVFH64-NEXT: addiw a2, a2, -1
+; ZVFH64-NEXT: and a1, a1, a2
+; ZVFH64-NEXT: vslidedown.vi v8, v9, 1
+; ZVFH64-NEXT: vmv.x.s a3, v8
+; ZVFH64-NEXT: and a2, a3, a2
+; ZVFH64-NEXT: slli a2, a2, 15
+; ZVFH64-NEXT: vslidedown.vi v8, v9, 2
+; ZVFH64-NEXT: vmv.x.s a3, v8
+; ZVFH64-NEXT: slli a3, a3, 30
+; ZVFH64-NEXT: or a1, a1, a3
+; ZVFH64-NEXT: or a1, a1, a2
+; ZVFH64-NEXT: sw a1, 0(a0)
+; ZVFH64-NEXT: slli a1, a1, 19
+; ZVFH64-NEXT: srli a1, a1, 51
+; ZVFH64-NEXT: sh a1, 4(a0)
+; ZVFH64-NEXT: ret
;
-; LMULMAX1RV32-LABEL: fp2si_v3f32_v3i15:
-; LMULMAX1RV32: # %bb.0:
-; LMULMAX1RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX1RV32-NEXT: vfncvt.rtz.x.f.w v9, v8
-; LMULMAX1RV32-NEXT: vslidedown.vi v8, v9, 2
-; LMULMAX1RV32-NEXT: vmv.x.s a1, v8
-; LMULMAX1RV32-NEXT: slli a2, a1, 17
-; LMULMAX1RV32-NEXT: srli a2, a2, 19
-; LMULMAX1RV32-NEXT: sh a2, 4(a0)
-; LMULMAX1RV32-NEXT: vmv.x.s a2, v9
-; LMULMAX1RV32-NEXT: lui a3, 8
-; LMULMAX1RV32-NEXT: addi a3, a3, -1
-; LMULMAX1RV32-NEXT: and a2, a2, a3
-; LMULMAX1RV32-NEXT: vslidedown.vi v8, v9, 1
-; LMULMAX1RV32-NEXT: vmv.x.s a4, v8
-; LMULMAX1RV32-NEXT: and a3, a4, a3
-; LMULMAX1RV32-NEXT: slli a3, a3, 15
-; LMULMAX1RV32-NEXT: slli a1, a1, 30
-; LMULMAX1RV32-NEXT: or a1, a2, a1
-; LMULMAX1RV32-NEXT: or a1, a1, a3
-; LMULMAX1RV32-NEXT: sw a1, 0(a0)
-; LMULMAX1RV32-NEXT: ret
+; ZVFHMIN32-LABEL: fp2si_v3f32_v3i15:
+; ZVFHMIN32: # %bb.0:
+; ZVFHMIN32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN32-NEXT: vfncvt.rtz.x.f.w v9, v8
+; ZVFHMIN32-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN32-NEXT: vmv.x.s a1, v8
+; ZVFHMIN32-NEXT: slli a2, a1, 17
+; ZVFHMIN32-NEXT: srli a2, a2, 19
+; ZVFHMIN32-NEXT: sh a2, 4(a0)
+; ZVFHMIN32-NEXT: vmv.x.s a2, v9
+; ZVFHMIN32-NEXT: lui a3, 8
+; ZVFHMIN32-NEXT: addi a3, a3, -1
+; ZVFHMIN32-NEXT: and a2, a2, a3
+; ZVFHMIN32-NEXT: vslidedown.vi v8, v9, 1
+; ZVFHMIN32-NEXT: vmv.x.s a4, v8
+; ZVFHMIN32-NEXT: and a3, a4, a3
+; ZVFHMIN32-NEXT: slli a3, a3, 15
+; ZVFHMIN32-NEXT: slli a1, a1, 30
+; ZVFHMIN32-NEXT: or a1, a2, a1
+; ZVFHMIN32-NEXT: or a1, a1, a3
+; ZVFHMIN32-NEXT: sw a1, 0(a0)
+; ZVFHMIN32-NEXT: ret
;
-; LMULMAX1RV64-LABEL: fp2si_v3f32_v3i15:
-; LMULMAX1RV64: # %bb.0:
-; LMULMAX1RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX1RV64-NEXT: vfncvt.rtz.x.f.w v9, v8
-; LMULMAX1RV64-NEXT: vmv.x.s a1, v9
-; LMULMAX1RV64-NEXT: lui a2, 8
-; LMULMAX1RV64-NEXT: addiw a2, a2, -1
-; LMULMAX1RV64-NEXT: and a1, a1, a2
-; LMULMAX1RV64-NEXT: vslidedown.vi v8, v9, 1
-; LMULMAX1RV64-NEXT: vmv.x.s a3, v8
-; LMULMAX1RV64-NEXT: and a2, a3, a2
-; LMULMAX1RV64-NEXT: slli a2, a2, 15
-; LMULMAX1RV64-NEXT: vslidedown.vi v8, v9, 2
-; LMULMAX1RV64-NEXT: vmv.x.s a3, v8
-; LMULMAX1RV64-NEXT: slli a3, a3, 30
-; LMULMAX1RV64-NEXT: or a1, a1, a3
-; LMULMAX1RV64-NEXT: or a1, a1, a2
-; LMULMAX1RV64-NEXT: sw a1, 0(a0)
-; LMULMAX1RV64-NEXT: slli a1, a1, 19
-; LMULMAX1RV64-NEXT: srli a1, a1, 51
-; LMULMAX1RV64-NEXT: sh a1, 4(a0)
-; LMULMAX1RV64-NEXT: ret
+; ZVFHMIN64-LABEL: fp2si_v3f32_v3i15:
+; ZVFHMIN64: # %bb.0:
+; ZVFHMIN64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN64-NEXT: vfncvt.rtz.x.f.w v9, v8
+; ZVFHMIN64-NEXT: vmv.x.s a1, v9
+; ZVFHMIN64-NEXT: lui a2, 8
+; ZVFHMIN64-NEXT: addiw a2, a2, -1
+; ZVFHMIN64-NEXT: and a1, a1, a2
+; ZVFHMIN64-NEXT: vslidedown.vi v8, v9, 1
+; ZVFHMIN64-NEXT: vmv.x.s a3, v8
+; ZVFHMIN64-NEXT: and a2, a3, a2
+; ZVFHMIN64-NEXT: slli a2, a2, 15
+; ZVFHMIN64-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN64-NEXT: vmv.x.s a3, v8
+; ZVFHMIN64-NEXT: slli a3, a3, 30
+; ZVFHMIN64-NEXT: or a1, a1, a3
+; ZVFHMIN64-NEXT: or a1, a1, a2
+; ZVFHMIN64-NEXT: sw a1, 0(a0)
+; ZVFHMIN64-NEXT: slli a1, a1, 19
+; ZVFHMIN64-NEXT: srli a1, a1, 51
+; ZVFHMIN64-NEXT: sh a1, 4(a0)
+; ZVFHMIN64-NEXT: ret
%z = fptosi <3 x float> %x to <3 x i15>
ret <3 x i15> %z
}
; FIXME: This is expanded when they could be widened + promoted
define <3 x i15> @fp2ui_v3f32_v3i15(<3 x float> %x) {
-; LMULMAX8RV32-LABEL: fp2ui_v3f32_v3i15:
-; LMULMAX8RV32: # %bb.0:
-; LMULMAX8RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX8RV32-NEXT: vfncvt.rtz.x.f.w v9, v8
-; LMULMAX8RV32-NEXT: vslidedown.vi v8, v9, 2
-; LMULMAX8RV32-NEXT: vmv.x.s a1, v8
-; LMULMAX8RV32-NEXT: slli a2, a1, 17
-; LMULMAX8RV32-NEXT: srli a2, a2, 19
-; LMULMAX8RV32-NEXT: sh a2, 4(a0)
-; LMULMAX8RV32-NEXT: vmv.x.s a2, v9
-; LMULMAX8RV32-NEXT: lui a3, 16
-; LMULMAX8RV32-NEXT: addi a3, a3, -1
-; LMULMAX8RV32-NEXT: and a2, a2, a3
-; LMULMAX8RV32-NEXT: vslidedown.vi v8, v9, 1
-; LMULMAX8RV32-NEXT: vmv.x.s a4, v8
-; LMULMAX8RV32-NEXT: and a3, a4, a3
-; LMULMAX8RV32-NEXT: slli a3, a3, 15
-; LMULMAX8RV32-NEXT: slli a1, a1, 30
-; LMULMAX8RV32-NEXT: or a1, a2, a1
-; LMULMAX8RV32-NEXT: or a1, a1, a3
-; LMULMAX8RV32-NEXT: sw a1, 0(a0)
-; LMULMAX8RV32-NEXT: ret
+; ZVFH32-LABEL: fp2ui_v3f32_v3i15:
+; ZVFH32: # %bb.0:
+; ZVFH32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFH32-NEXT: vfncvt.rtz.x.f.w v9, v8
+; ZVFH32-NEXT: vslidedown.vi v8, v9, 2
+; ZVFH32-NEXT: vmv.x.s a1, v8
+; ZVFH32-NEXT: slli a2, a1, 17
+; ZVFH32-NEXT: srli a2, a2, 19
+; ZVFH32-NEXT: sh a2, 4(a0)
+; ZVFH32-NEXT: vmv.x.s a2, v9
+; ZVFH32-NEXT: lui a3, 16
+; ZVFH32-NEXT: addi a3, a3, -1
+; ZVFH32-NEXT: and a2, a2, a3
+; ZVFH32-NEXT: vslidedown.vi v8, v9, 1
+; ZVFH32-NEXT: vmv.x.s a4, v8
+; ZVFH32-NEXT: and a3, a4, a3
+; ZVFH32-NEXT: slli a3, a3, 15
+; ZVFH32-NEXT: slli a1, a1, 30
+; ZVFH32-NEXT: or a1, a2, a1
+; ZVFH32-NEXT: or a1, a1, a3
+; ZVFH32-NEXT: sw a1, 0(a0)
+; ZVFH32-NEXT: ret
;
-; LMULMAX8RV64-LABEL: fp2ui_v3f32_v3i15:
-; LMULMAX8RV64: # %bb.0:
-; LMULMAX8RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX8RV64-NEXT: vfncvt.rtz.x.f.w v9, v8
-; LMULMAX8RV64-NEXT: vmv.x.s a1, v9
-; LMULMAX8RV64-NEXT: lui a2, 16
-; LMULMAX8RV64-NEXT: addiw a2, a2, -1
-; LMULMAX8RV64-NEXT: and a1, a1, a2
-; LMULMAX8RV64-NEXT: vslidedown.vi v8, v9, 1
-; LMULMAX8RV64-NEXT: vmv.x.s a3, v8
-; LMULMAX8RV64-NEXT: and a2, a3, a2
-; LMULMAX8RV64-NEXT: slli a2, a2, 15
-; LMULMAX8RV64-NEXT: vslidedown.vi v8, v9, 2
-; LMULMAX8RV64-NEXT: vmv.x.s a3, v8
-; LMULMAX8RV64-NEXT: slli a3, a3, 30
-; LMULMAX8RV64-NEXT: or a1, a1, a3
-; LMULMAX8RV64-NEXT: or a1, a1, a2
-; LMULMAX8RV64-NEXT: sw a1, 0(a0)
-; LMULMAX8RV64-NEXT: slli a1, a1, 19
-; LMULMAX8RV64-NEXT: srli a1, a1, 51
-; LMULMAX8RV64-NEXT: sh a1, 4(a0)
-; LMULMAX8RV64-NEXT: ret
+; ZVFH64-LABEL: fp2ui_v3f32_v3i15:
+; ZVFH64: # %bb.0:
+; ZVFH64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFH64-NEXT: vfncvt.rtz.x.f.w v9, v8
+; ZVFH64-NEXT: vmv.x.s a1, v9
+; ZVFH64-NEXT: lui a2, 16
+; ZVFH64-NEXT: addiw a2, a2, -1
+; ZVFH64-NEXT: and a1, a1, a2
+; ZVFH64-NEXT: vslidedown.vi v8, v9, 1
+; ZVFH64-NEXT: vmv.x.s a3, v8
+; ZVFH64-NEXT: and a2, a3, a2
+; ZVFH64-NEXT: slli a2, a2, 15
+; ZVFH64-NEXT: vslidedown.vi v8, v9, 2
+; ZVFH64-NEXT: vmv.x.s a3, v8
+; ZVFH64-NEXT: slli a3, a3, 30
+; ZVFH64-NEXT: or a1, a1, a3
+; ZVFH64-NEXT: or a1, a1, a2
+; ZVFH64-NEXT: sw a1, 0(a0)
+; ZVFH64-NEXT: slli a1, a1, 19
+; ZVFH64-NEXT: srli a1, a1, 51
+; ZVFH64-NEXT: sh a1, 4(a0)
+; ZVFH64-NEXT: ret
;
-; LMULMAX1RV32-LABEL: fp2ui_v3f32_v3i15:
-; LMULMAX1RV32: # %bb.0:
-; LMULMAX1RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX1RV32-NEXT: vfncvt.rtz.x.f.w v9, v8
-; LMULMAX1RV32-NEXT: vslidedown.vi v8, v9, 2
-; LMULMAX1RV32-NEXT: vmv.x.s a1, v8
-; LMULMAX1RV32-NEXT: slli a2, a1, 17
-; LMULMAX1RV32-NEXT: srli a2, a2, 19
-; LMULMAX1RV32-NEXT: sh a2, 4(a0)
-; LMULMAX1RV32-NEXT: vmv.x.s a2, v9
-; LMULMAX1RV32-NEXT: lui a3, 16
-; LMULMAX1RV32-NEXT: addi a3, a3, -1
-; LMULMAX1RV32-NEXT: and a2, a2, a3
-; LMULMAX1RV32-NEXT: vslidedown.vi v8, v9, 1
-; LMULMAX1RV32-NEXT: vmv.x.s a4, v8
-; LMULMAX1RV32-NEXT: and a3, a4, a3
-; LMULMAX1RV32-NEXT: slli a3, a3, 15
-; LMULMAX1RV32-NEXT: slli a1, a1, 30
-; LMULMAX1RV32-NEXT: or a1, a2, a1
-; LMULMAX1RV32-NEXT: or a1, a1, a3
-; LMULMAX1RV32-NEXT: sw a1, 0(a0)
-; LMULMAX1RV32-NEXT: ret
+; ZVFHMIN32-LABEL: fp2ui_v3f32_v3i15:
+; ZVFHMIN32: # %bb.0:
+; ZVFHMIN32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN32-NEXT: vfncvt.rtz.x.f.w v9, v8
+; ZVFHMIN32-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN32-NEXT: vmv.x.s a1, v8
+; ZVFHMIN32-NEXT: slli a2, a1, 17
+; ZVFHMIN32-NEXT: srli a2, a2, 19
+; ZVFHMIN32-NEXT: sh a2, 4(a0)
+; ZVFHMIN32-NEXT: vmv.x.s a2, v9
+; ZVFHMIN32-NEXT: lui a3, 16
+; ZVFHMIN32-NEXT: addi a3, a3, -1
+; ZVFHMIN32-NEXT: and a2, a2, a3
+; ZVFHMIN32-NEXT: vslidedown.vi v8, v9, 1
+; ZVFHMIN32-NEXT: vmv.x.s a4, v8
+; ZVFHMIN32-NEXT: and a3, a4, a3
+; ZVFHMIN32-NEXT: slli a3, a3, 15
+; ZVFHMIN32-NEXT: slli a1, a1, 30
+; ZVFHMIN32-NEXT: or a1, a2, a1
+; ZVFHMIN32-NEXT: or a1, a1, a3
+; ZVFHMIN32-NEXT: sw a1, 0(a0)
+; ZVFHMIN32-NEXT: ret
;
-; LMULMAX1RV64-LABEL: fp2ui_v3f32_v3i15:
-; LMULMAX1RV64: # %bb.0:
-; LMULMAX1RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX1RV64-NEXT: vfncvt.rtz.x.f.w v9, v8
-; LMULMAX1RV64-NEXT: vmv.x.s a1, v9
-; LMULMAX1RV64-NEXT: lui a2, 16
-; LMULMAX1RV64-NEXT: addiw a2, a2, -1
-; LMULMAX1RV64-NEXT: and a1, a1, a2
-; LMULMAX1RV64-NEXT: vslidedown.vi v8, v9, 1
-; LMULMAX1RV64-NEXT: vmv.x.s a3, v8
-; LMULMAX1RV64-NEXT: and a2, a3, a2
-; LMULMAX1RV64-NEXT: slli a2, a2, 15
-; LMULMAX1RV64-NEXT: vslidedown.vi v8, v9, 2
-; LMULMAX1RV64-NEXT: vmv.x.s a3, v8
-; LMULMAX1RV64-NEXT: slli a3, a3, 30
-; LMULMAX1RV64-NEXT: or a1, a1, a3
-; LMULMAX1RV64-NEXT: or a1, a1, a2
-; LMULMAX1RV64-NEXT: sw a1, 0(a0)
-; LMULMAX1RV64-NEXT: slli a1, a1, 19
-; LMULMAX1RV64-NEXT: srli a1, a1, 51
-; LMULMAX1RV64-NEXT: sh a1, 4(a0)
-; LMULMAX1RV64-NEXT: ret
+; ZVFHMIN64-LABEL: fp2ui_v3f32_v3i15:
+; ZVFHMIN64: # %bb.0:
+; ZVFHMIN64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN64-NEXT: vfncvt.rtz.x.f.w v9, v8
+; ZVFHMIN64-NEXT: vmv.x.s a1, v9
+; ZVFHMIN64-NEXT: lui a2, 16
+; ZVFHMIN64-NEXT: addiw a2, a2, -1
+; ZVFHMIN64-NEXT: and a1, a1, a2
+; ZVFHMIN64-NEXT: vslidedown.vi v8, v9, 1
+; ZVFHMIN64-NEXT: vmv.x.s a3, v8
+; ZVFHMIN64-NEXT: and a2, a3, a2
+; ZVFHMIN64-NEXT: slli a2, a2, 15
+; ZVFHMIN64-NEXT: vslidedown.vi v8, v9, 2
+; ZVFHMIN64-NEXT: vmv.x.s a3, v8
+; ZVFHMIN64-NEXT: slli a3, a3, 30
+; ZVFHMIN64-NEXT: or a1, a1, a3
+; ZVFHMIN64-NEXT: or a1, a1, a2
+; ZVFHMIN64-NEXT: sw a1, 0(a0)
+; ZVFHMIN64-NEXT: slli a1, a1, 19
+; ZVFHMIN64-NEXT: srli a1, a1, 51
+; ZVFHMIN64-NEXT: sh a1, 4(a0)
+; ZVFHMIN64-NEXT: ret
%z = fptoui <3 x float> %x to <3 x i15>
ret <3 x i15> %z
}
@@ -333,26 +329,13 @@ define <3 x i1> @fp2ui_v3f32_v3i1(<3 x float> %x) {
}
define void @fp2si_v8f32_v8i32(ptr %x, ptr %y) {
-; LMULMAX8-LABEL: fp2si_v8f32_v8i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX8-NEXT: vle32.v v8, (a0)
-; LMULMAX8-NEXT: vfcvt.rtz.x.f.v v8, v8
-; LMULMAX8-NEXT: vse32.v v8, (a1)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: fp2si_v8f32_v8i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: addi a2, a0, 16
-; LMULMAX1-NEXT: vle32.v v8, (a2)
-; LMULMAX1-NEXT: vle32.v v9, (a0)
-; LMULMAX1-NEXT: vfcvt.rtz.x.f.v v8, v8
-; LMULMAX1-NEXT: vfcvt.rtz.x.f.v v9, v9
-; LMULMAX1-NEXT: vse32.v v9, (a1)
-; LMULMAX1-NEXT: addi a1, a1, 16
-; LMULMAX1-NEXT: vse32.v v8, (a1)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: fp2si_v8f32_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: vse32.v v8, (a1)
+; CHECK-NEXT: ret
%a = load <8 x float>, ptr %x
%d = fptosi <8 x float> %a to <8 x i32>
store <8 x i32> %d, ptr %y
@@ -360,26 +343,13 @@ define void @fp2si_v8f32_v8i32(ptr %x, ptr %y) {
}
define void @fp2ui_v8f32_v8i32(ptr %x, ptr %y) {
-; LMULMAX8-LABEL: fp2ui_v8f32_v8i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX8-NEXT: vle32.v v8, (a0)
-; LMULMAX8-NEXT: vfcvt.rtz.xu.f.v v8, v8
-; LMULMAX8-NEXT: vse32.v v8, (a1)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: fp2ui_v8f32_v8i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: addi a2, a0, 16
-; LMULMAX1-NEXT: vle32.v v8, (a2)
-; LMULMAX1-NEXT: vle32.v v9, (a0)
-; LMULMAX1-NEXT: vfcvt.rtz.xu.f.v v8, v8
-; LMULMAX1-NEXT: vfcvt.rtz.xu.f.v v9, v9
-; LMULMAX1-NEXT: vse32.v v9, (a1)
-; LMULMAX1-NEXT: addi a1, a1, 16
-; LMULMAX1-NEXT: vse32.v v8, (a1)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: fp2ui_v8f32_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: vse32.v v8, (a1)
+; CHECK-NEXT: ret
%a = load <8 x float>, ptr %x
%d = fptoui <8 x float> %a to <8 x i32>
store <8 x i32> %d, ptr %y
@@ -387,67 +357,25 @@ define void @fp2ui_v8f32_v8i32(ptr %x, ptr %y) {
}
define <8 x i1> @fp2si_v8f32_v8i1(<8 x float> %x) {
-; LMULMAX8-LABEL: fp2si_v8f32_v8i1:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX8-NEXT: vfncvt.rtz.x.f.w v10, v8
-; LMULMAX8-NEXT: vand.vi v8, v10, 1
-; LMULMAX8-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: fp2si_v8f32_v8i1:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v10, v8
-; LMULMAX1-NEXT: vand.vi v8, v10, 1
-; LMULMAX1-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v8, 0
-; LMULMAX1-NEXT: vmerge.vim v8, v8, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v10, v9
-; LMULMAX1-NEXT: vand.vi v9, v10, 1
-; LMULMAX1-NEXT: vmsne.vi v0, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v9, 0
-; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
-; LMULMAX1-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: fp2si_v8f32_v8i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
+; CHECK-NEXT: vand.vi v8, v10, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
%z = fptosi <8 x float> %x to <8 x i1>
ret <8 x i1> %z
}
define <8 x i1> @fp2ui_v8f32_v8i1(<8 x float> %x) {
-; LMULMAX8-LABEL: fp2ui_v8f32_v8i1:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX8-NEXT: vfncvt.rtz.xu.f.w v10, v8
-; LMULMAX8-NEXT: vand.vi v8, v10, 1
-; LMULMAX8-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: fp2ui_v8f32_v8i1:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v10, v8
-; LMULMAX1-NEXT: vand.vi v8, v10, 1
-; LMULMAX1-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v8, 0
-; LMULMAX1-NEXT: vmerge.vim v8, v8, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v10, v9
-; LMULMAX1-NEXT: vand.vi v9, v10, 1
-; LMULMAX1-NEXT: vmsne.vi v0, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v9, 0
-; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
-; LMULMAX1-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: fp2ui_v8f32_v8i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; CHECK-NEXT: vand.vi v8, v10, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
%z = fptoui <8 x float> %x to <8 x i1>
ret <8 x i1> %z
}
@@ -481,39 +409,13 @@ define void @fp2ui_v2f32_v2i64(ptr %x, ptr %y) {
}
define void @fp2si_v8f32_v8i64(ptr %x, ptr %y) {
-; LMULMAX8-LABEL: fp2si_v8f32_v8i64:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX8-NEXT: vle32.v v8, (a0)
-; LMULMAX8-NEXT: vfwcvt.rtz.x.f.v v12, v8
-; LMULMAX8-NEXT: vse64.v v12, (a1)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: fp2si_v8f32_v8i64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: addi a2, a0, 16
-; LMULMAX1-NEXT: vle32.v v8, (a2)
-; LMULMAX1-NEXT: vle32.v v9, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfwcvt.rtz.x.f.v v10, v8
-; LMULMAX1-NEXT: vfwcvt.rtz.x.f.v v11, v9
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfwcvt.rtz.x.f.v v12, v8
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfwcvt.rtz.x.f.v v9, v8
-; LMULMAX1-NEXT: addi a0, a1, 16
-; LMULMAX1-NEXT: vse64.v v9, (a0)
-; LMULMAX1-NEXT: addi a0, a1, 48
-; LMULMAX1-NEXT: vse64.v v12, (a0)
-; LMULMAX1-NEXT: vse64.v v11, (a1)
-; LMULMAX1-NEXT: addi a0, a1, 32
-; LMULMAX1-NEXT: vse64.v v10, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: fp2si_v8f32_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v8
+; CHECK-NEXT: vse64.v v12, (a1)
+; CHECK-NEXT: ret
%a = load <8 x float>, ptr %x
%d = fptosi <8 x float> %a to <8 x i64>
store <8 x i64> %d, ptr %y
@@ -521,39 +423,13 @@ define void @fp2si_v8f32_v8i64(ptr %x, ptr %y) {
}
define void @fp2ui_v8f32_v8i64(ptr %x, ptr %y) {
-; LMULMAX8-LABEL: fp2ui_v8f32_v8i64:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX8-NEXT: vle32.v v8, (a0)
-; LMULMAX8-NEXT: vfwcvt.rtz.xu.f.v v12, v8
-; LMULMAX8-NEXT: vse64.v v12, (a1)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: fp2ui_v8f32_v8i64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: addi a2, a0, 16
-; LMULMAX1-NEXT: vle32.v v8, (a2)
-; LMULMAX1-NEXT: vle32.v v9, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfwcvt.rtz.xu.f.v v10, v8
-; LMULMAX1-NEXT: vfwcvt.rtz.xu.f.v v11, v9
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfwcvt.rtz.xu.f.v v12, v8
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfwcvt.rtz.xu.f.v v9, v8
-; LMULMAX1-NEXT: addi a0, a1, 16
-; LMULMAX1-NEXT: vse64.v v9, (a0)
-; LMULMAX1-NEXT: addi a0, a1, 48
-; LMULMAX1-NEXT: vse64.v v12, (a0)
-; LMULMAX1-NEXT: vse64.v v11, (a1)
-; LMULMAX1-NEXT: addi a0, a1, 32
-; LMULMAX1-NEXT: vse64.v v10, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: fp2ui_v8f32_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v12, v8
+; CHECK-NEXT: vse64.v v12, (a1)
+; CHECK-NEXT: ret
%a = load <8 x float>, ptr %x
%d = fptoui <8 x float> %a to <8 x i64>
store <8 x i64> %d, ptr %y
@@ -593,145 +469,43 @@ define void @fp2ui_v2f16_v2i64(ptr %x, ptr %y) {
}
define <2 x i1> @fp2si_v2f16_v2i1(<2 x half> %x) {
-; LMULMAX8RV32ZVFH-LABEL: fp2si_v2f16_v2i1:
-; LMULMAX8RV32ZVFH: # %bb.0:
-; LMULMAX8RV32ZVFH-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; LMULMAX8RV32ZVFH-NEXT: vfncvt.rtz.x.f.w v9, v8
-; LMULMAX8RV32ZVFH-NEXT: vand.vi v8, v9, 1
-; LMULMAX8RV32ZVFH-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX8RV32ZVFH-NEXT: ret
-;
-; LMULMAX8RV64ZVFH-LABEL: fp2si_v2f16_v2i1:
-; LMULMAX8RV64ZVFH: # %bb.0:
-; LMULMAX8RV64ZVFH-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; LMULMAX8RV64ZVFH-NEXT: vfncvt.rtz.x.f.w v9, v8
-; LMULMAX8RV64ZVFH-NEXT: vand.vi v8, v9, 1
-; LMULMAX8RV64ZVFH-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX8RV64ZVFH-NEXT: ret
-;
-; LMULMAX1RV32ZVFH-LABEL: fp2si_v2f16_v2i1:
-; LMULMAX1RV32ZVFH: # %bb.0:
-; LMULMAX1RV32ZVFH-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; LMULMAX1RV32ZVFH-NEXT: vfncvt.rtz.x.f.w v9, v8
-; LMULMAX1RV32ZVFH-NEXT: vand.vi v8, v9, 1
-; LMULMAX1RV32ZVFH-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX1RV32ZVFH-NEXT: ret
+; ZVFH-LABEL: fp2si_v2f16_v2i1:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; ZVFH-NEXT: vfncvt.rtz.x.f.w v9, v8
+; ZVFH-NEXT: vand.vi v8, v9, 1
+; ZVFH-NEXT: vmsne.vi v0, v8, 0
+; ZVFH-NEXT: ret
;
-; LMULMAX1RV64ZVFH-LABEL: fp2si_v2f16_v2i1:
-; LMULMAX1RV64ZVFH: # %bb.0:
-; LMULMAX1RV64ZVFH-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; LMULMAX1RV64ZVFH-NEXT: vfncvt.rtz.x.f.w v9, v8
-; LMULMAX1RV64ZVFH-NEXT: vand.vi v8, v9, 1
-; LMULMAX1RV64ZVFH-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX1RV64ZVFH-NEXT: ret
-;
-; LMULMAX8RV32ZVFHMIN-LABEL: fp2si_v2f16_v2i1:
-; LMULMAX8RV32ZVFHMIN: # %bb.0:
-; LMULMAX8RV32ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
-; LMULMAX8RV32ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; LMULMAX8RV32ZVFHMIN-NEXT: vfncvt.rtz.x.f.w v8, v9
-; LMULMAX8RV32ZVFHMIN-NEXT: vand.vi v8, v8, 1
-; LMULMAX8RV32ZVFHMIN-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX8RV32ZVFHMIN-NEXT: ret
-;
-; LMULMAX8RV64ZVFHMIN-LABEL: fp2si_v2f16_v2i1:
-; LMULMAX8RV64ZVFHMIN: # %bb.0:
-; LMULMAX8RV64ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
-; LMULMAX8RV64ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; LMULMAX8RV64ZVFHMIN-NEXT: vfncvt.rtz.x.f.w v8, v9
-; LMULMAX8RV64ZVFHMIN-NEXT: vand.vi v8, v8, 1
-; LMULMAX8RV64ZVFHMIN-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX8RV64ZVFHMIN-NEXT: ret
-;
-; LMULMAX1RV32ZVFHMIN-LABEL: fp2si_v2f16_v2i1:
-; LMULMAX1RV32ZVFHMIN: # %bb.0:
-; LMULMAX1RV32ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
-; LMULMAX1RV32ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; LMULMAX1RV32ZVFHMIN-NEXT: vfncvt.rtz.x.f.w v8, v9
-; LMULMAX1RV32ZVFHMIN-NEXT: vand.vi v8, v8, 1
-; LMULMAX1RV32ZVFHMIN-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX1RV32ZVFHMIN-NEXT: ret
-;
-; LMULMAX1RV64ZVFHMIN-LABEL: fp2si_v2f16_v2i1:
-; LMULMAX1RV64ZVFHMIN: # %bb.0:
-; LMULMAX1RV64ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
-; LMULMAX1RV64ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; LMULMAX1RV64ZVFHMIN-NEXT: vfncvt.rtz.x.f.w v8, v9
-; LMULMAX1RV64ZVFHMIN-NEXT: vand.vi v8, v8, 1
-; LMULMAX1RV64ZVFHMIN-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX1RV64ZVFHMIN-NEXT: ret
+; ZVFHMIN-LABEL: fp2si_v2f16_v2i1:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT: vfncvt.rtz.x.f.w v8, v9
+; ZVFHMIN-NEXT: vand.vi v8, v8, 1
+; ZVFHMIN-NEXT: vmsne.vi v0, v8, 0
+; ZVFHMIN-NEXT: ret
%z = fptosi <2 x half> %x to <2 x i1>
ret <2 x i1> %z
}
define <2 x i1> @fp2ui_v2f16_v2i1(<2 x half> %x) {
-; LMULMAX8RV32ZVFH-LABEL: fp2ui_v2f16_v2i1:
-; LMULMAX8RV32ZVFH: # %bb.0:
-; LMULMAX8RV32ZVFH-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; LMULMAX8RV32ZVFH-NEXT: vfncvt.rtz.xu.f.w v9, v8
-; LMULMAX8RV32ZVFH-NEXT: vand.vi v8, v9, 1
-; LMULMAX8RV32ZVFH-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX8RV32ZVFH-NEXT: ret
-;
-; LMULMAX8RV64ZVFH-LABEL: fp2ui_v2f16_v2i1:
-; LMULMAX8RV64ZVFH: # %bb.0:
-; LMULMAX8RV64ZVFH-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; LMULMAX8RV64ZVFH-NEXT: vfncvt.rtz.xu.f.w v9, v8
-; LMULMAX8RV64ZVFH-NEXT: vand.vi v8, v9, 1
-; LMULMAX8RV64ZVFH-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX8RV64ZVFH-NEXT: ret
-;
-; LMULMAX1RV32ZVFH-LABEL: fp2ui_v2f16_v2i1:
-; LMULMAX1RV32ZVFH: # %bb.0:
-; LMULMAX1RV32ZVFH-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; LMULMAX1RV32ZVFH-NEXT: vfncvt.rtz.xu.f.w v9, v8
-; LMULMAX1RV32ZVFH-NEXT: vand.vi v8, v9, 1
-; LMULMAX1RV32ZVFH-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX1RV32ZVFH-NEXT: ret
-;
-; LMULMAX1RV64ZVFH-LABEL: fp2ui_v2f16_v2i1:
-; LMULMAX1RV64ZVFH: # %bb.0:
-; LMULMAX1RV64ZVFH-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; LMULMAX1RV64ZVFH-NEXT: vfncvt.rtz.xu.f.w v9, v8
-; LMULMAX1RV64ZVFH-NEXT: vand.vi v8, v9, 1
-; LMULMAX1RV64ZVFH-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX1RV64ZVFH-NEXT: ret
-;
-; LMULMAX8RV32ZVFHMIN-LABEL: fp2ui_v2f16_v2i1:
-; LMULMAX8RV32ZVFHMIN: # %bb.0:
-; LMULMAX8RV32ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
-; LMULMAX8RV32ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; LMULMAX8RV32ZVFHMIN-NEXT: vfncvt.rtz.xu.f.w v8, v9
-; LMULMAX8RV32ZVFHMIN-NEXT: vand.vi v8, v8, 1
-; LMULMAX8RV32ZVFHMIN-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX8RV32ZVFHMIN-NEXT: ret
+; ZVFH-LABEL: fp2ui_v2f16_v2i1:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; ZVFH-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; ZVFH-NEXT: vand.vi v8, v9, 1
+; ZVFH-NEXT: vmsne.vi v0, v8, 0
+; ZVFH-NEXT: ret
;
-; LMULMAX8RV64ZVFHMIN-LABEL: fp2ui_v2f16_v2i1:
-; LMULMAX8RV64ZVFHMIN: # %bb.0:
-; LMULMAX8RV64ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
-; LMULMAX8RV64ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; LMULMAX8RV64ZVFHMIN-NEXT: vfncvt.rtz.xu.f.w v8, v9
-; LMULMAX8RV64ZVFHMIN-NEXT: vand.vi v8, v8, 1
-; LMULMAX8RV64ZVFHMIN-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX8RV64ZVFHMIN-NEXT: ret
-;
-; LMULMAX1RV32ZVFHMIN-LABEL: fp2ui_v2f16_v2i1:
-; LMULMAX1RV32ZVFHMIN: # %bb.0:
-; LMULMAX1RV32ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
-; LMULMAX1RV32ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; LMULMAX1RV32ZVFHMIN-NEXT: vfncvt.rtz.xu.f.w v8, v9
-; LMULMAX1RV32ZVFHMIN-NEXT: vand.vi v8, v8, 1
-; LMULMAX1RV32ZVFHMIN-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX1RV32ZVFHMIN-NEXT: ret
-;
-; LMULMAX1RV64ZVFHMIN-LABEL: fp2ui_v2f16_v2i1:
-; LMULMAX1RV64ZVFHMIN: # %bb.0:
-; LMULMAX1RV64ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
-; LMULMAX1RV64ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; LMULMAX1RV64ZVFHMIN-NEXT: vfncvt.rtz.xu.f.w v8, v9
-; LMULMAX1RV64ZVFHMIN-NEXT: vand.vi v8, v8, 1
-; LMULMAX1RV64ZVFHMIN-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX1RV64ZVFHMIN-NEXT: ret
+; ZVFHMIN-LABEL: fp2ui_v2f16_v2i1:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT: vfncvt.rtz.xu.f.w v8, v9
+; ZVFHMIN-NEXT: vand.vi v8, v8, 1
+; ZVFHMIN-NEXT: vmsne.vi v0, v8, 0
+; ZVFHMIN-NEXT: ret
%z = fptoui <2 x half> %x to <2 x i1>
ret <2 x i1> %z
}
@@ -797,59 +571,17 @@ define <2 x i1> @fp2ui_v2f64_v2i1(<2 x double> %x) {
}
define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) {
-; LMULMAX8-LABEL: fp2si_v8f64_v8i8:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX8-NEXT: vle64.v v8, (a0)
-; LMULMAX8-NEXT: vfncvt.rtz.x.f.w v12, v8
-; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; LMULMAX8-NEXT: vnsrl.wi v8, v12, 0
-; LMULMAX8-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; LMULMAX8-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX8-NEXT: vse8.v v8, (a1)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: fp2si_v8f64_v8i8:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a2, a0, 48
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vle64.v v8, (a2)
-; LMULMAX1-NEXT: addi a2, a0, 32
-; LMULMAX1-NEXT: vle64.v v9, (a0)
-; LMULMAX1-NEXT: vle64.v v10, (a2)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vle64.v v11, (a0)
-; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v12, v9
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v12, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v12, v11
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v11, v12, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v11, v11, 0
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v9, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v11, v10
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v10, v11, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v10, v10, 0
-; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v9, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v10, v8
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v8, v10, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v9, v8, 6
-; LMULMAX1-NEXT: vse8.v v9, (a1)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: fp2si_v8f64_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v12, 0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v8, 0
+; CHECK-NEXT: vse8.v v8, (a1)
+; CHECK-NEXT: ret
%a = load <8 x double>, ptr %x
%d = fptosi <8 x double> %a to <8 x i8>
store <8 x i8> %d, ptr %y
@@ -857,59 +589,17 @@ define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) {
}
define void @fp2ui_v8f64_v8i8(ptr %x, ptr %y) {
-; LMULMAX8-LABEL: fp2ui_v8f64_v8i8:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX8-NEXT: vle64.v v8, (a0)
-; LMULMAX8-NEXT: vfncvt.rtz.xu.f.w v12, v8
-; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; LMULMAX8-NEXT: vnsrl.wi v8, v12, 0
-; LMULMAX8-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; LMULMAX8-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX8-NEXT: vse8.v v8, (a1)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: fp2ui_v8f64_v8i8:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a2, a0, 48
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vle64.v v8, (a2)
-; LMULMAX1-NEXT: addi a2, a0, 32
-; LMULMAX1-NEXT: vle64.v v9, (a0)
-; LMULMAX1-NEXT: vle64.v v10, (a2)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vle64.v v11, (a0)
-; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v12, v9
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v12, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v12, v11
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v11, v12, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v11, v11, 0
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v9, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v11, v10
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v10, v11, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v10, v10, 0
-; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v9, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v10, v8
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v8, v10, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v9, v8, 6
-; LMULMAX1-NEXT: vse8.v v9, (a1)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: fp2ui_v8f64_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v12, 0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v8, 0
+; CHECK-NEXT: vse8.v v8, (a1)
+; CHECK-NEXT: ret
%a = load <8 x double>, ptr %x
%d = fptoui <8 x double> %a to <8 x i8>
store <8 x i8> %d, ptr %y
@@ -917,111 +607,25 @@ define void @fp2ui_v8f64_v8i8(ptr %x, ptr %y) {
}
define <8 x i1> @fp2si_v8f64_v8i1(<8 x double> %x) {
-; LMULMAX8-LABEL: fp2si_v8f64_v8i1:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX8-NEXT: vfncvt.rtz.x.f.w v12, v8
-; LMULMAX8-NEXT: vand.vi v8, v12, 1
-; LMULMAX8-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: fp2si_v8f64_v8i1:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v12, v8
-; LMULMAX1-NEXT: vand.vi v8, v12, 1
-; LMULMAX1-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v8, 0
-; LMULMAX1-NEXT: vmerge.vim v12, v8, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v13, v9
-; LMULMAX1-NEXT: vand.vi v9, v13, 1
-; LMULMAX1-NEXT: vmsne.vi v0, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v9, 0
-; LMULMAX1-NEXT: vmerge.vim v13, v9, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v12, v13, 2
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vmsne.vi v0, v12, 0
-; LMULMAX1-NEXT: vmerge.vim v12, v8, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v13, v10
-; LMULMAX1-NEXT: vand.vi v10, v13, 1
-; LMULMAX1-NEXT: vmsne.vi v0, v10, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vmerge.vim v10, v9, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v12, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vmsne.vi v0, v12, 0
-; LMULMAX1-NEXT: vmerge.vim v8, v8, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v10, v11
-; LMULMAX1-NEXT: vand.vi v10, v10, 1
-; LMULMAX1-NEXT: vmsne.vi v0, v10, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 6
-; LMULMAX1-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: fp2si_v8f64_v8i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
+; CHECK-NEXT: vand.vi v8, v12, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
%z = fptosi <8 x double> %x to <8 x i1>
ret <8 x i1> %z
}
define <8 x i1> @fp2ui_v8f64_v8i1(<8 x double> %x) {
-; LMULMAX8-LABEL: fp2ui_v8f64_v8i1:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX8-NEXT: vfncvt.rtz.xu.f.w v12, v8
-; LMULMAX8-NEXT: vand.vi v8, v12, 1
-; LMULMAX8-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: fp2ui_v8f64_v8i1:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v12, v8
-; LMULMAX1-NEXT: vand.vi v8, v12, 1
-; LMULMAX1-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v8, 0
-; LMULMAX1-NEXT: vmerge.vim v12, v8, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v13, v9
-; LMULMAX1-NEXT: vand.vi v9, v13, 1
-; LMULMAX1-NEXT: vmsne.vi v0, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v9, 0
-; LMULMAX1-NEXT: vmerge.vim v13, v9, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v12, v13, 2
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vmsne.vi v0, v12, 0
-; LMULMAX1-NEXT: vmerge.vim v12, v8, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v13, v10
-; LMULMAX1-NEXT: vand.vi v10, v13, 1
-; LMULMAX1-NEXT: vmsne.vi v0, v10, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vmerge.vim v10, v9, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v12, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vmsne.vi v0, v12, 0
-; LMULMAX1-NEXT: vmerge.vim v8, v8, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v10, v11
-; LMULMAX1-NEXT: vand.vi v10, v10, 1
-; LMULMAX1-NEXT: vmsne.vi v0, v10, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 6
-; LMULMAX1-NEXT: vmsne.vi v0, v8, 0
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: fp2ui_v8f64_v8i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; CHECK-NEXT: vand.vi v8, v12, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
%z = fptoui <8 x double> %x to <8 x i1>
ret <8 x i1> %z
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
index ec11ada..6ffa6ac 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
@@ -1,10 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,LMULMAX8ZVFH,LMULMAX8RV32
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,LMULMAX8ZVFH,LMULMAX8RV64
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1RV32
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1RV64
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfhmin,+f,+d -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,LMULMAX8ZVFHMIN,LMULMAX8RV32ZVFHMIN
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfhmin,+f,+d -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,LMULMAX8ZVFHMIN,LMULMAX8RV64ZVFHMIN
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,ZVFH32
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,ZVFH64
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfhmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN32
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfhmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN64
define void @si2fp_v2i32_v2f32(ptr %x, ptr %y) {
; CHECK-LABEL: si2fp_v2i32_v2f32:
@@ -132,214 +130,146 @@ define <3 x float> @si2fp_v3i1_v3f32(<3 x i1> %x) {
; FIXME: This gets expanded instead of widened + promoted
define <3 x float> @si2fp_v3i7_v3f32(<3 x i7> %x) {
-; LMULMAX8RV32-LABEL: si2fp_v3i7_v3f32:
-; LMULMAX8RV32: # %bb.0:
-; LMULMAX8RV32-NEXT: lw a1, 4(a0)
-; LMULMAX8RV32-NEXT: lw a2, 0(a0)
-; LMULMAX8RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX8RV32-NEXT: lw a0, 8(a0)
-; LMULMAX8RV32-NEXT: vmv.v.x v8, a2
-; LMULMAX8RV32-NEXT: vslide1down.vx v8, v8, a1
-; LMULMAX8RV32-NEXT: vslide1down.vx v8, v8, a0
-; LMULMAX8RV32-NEXT: vslidedown.vi v8, v8, 1
-; LMULMAX8RV32-NEXT: vadd.vv v8, v8, v8
-; LMULMAX8RV32-NEXT: vsra.vi v8, v8, 1
-; LMULMAX8RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; LMULMAX8RV32-NEXT: vsext.vf2 v9, v8
-; LMULMAX8RV32-NEXT: vfwcvt.f.x.v v8, v9
-; LMULMAX8RV32-NEXT: ret
+; ZVFH32-LABEL: si2fp_v3i7_v3f32:
+; ZVFH32: # %bb.0:
+; ZVFH32-NEXT: lw a1, 4(a0)
+; ZVFH32-NEXT: lw a2, 0(a0)
+; ZVFH32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; ZVFH32-NEXT: lw a0, 8(a0)
+; ZVFH32-NEXT: vmv.v.x v8, a2
+; ZVFH32-NEXT: vslide1down.vx v8, v8, a1
+; ZVFH32-NEXT: vslide1down.vx v8, v8, a0
+; ZVFH32-NEXT: vslidedown.vi v8, v8, 1
+; ZVFH32-NEXT: vadd.vv v8, v8, v8
+; ZVFH32-NEXT: vsra.vi v8, v8, 1
+; ZVFH32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFH32-NEXT: vsext.vf2 v9, v8
+; ZVFH32-NEXT: vfwcvt.f.x.v v8, v9
+; ZVFH32-NEXT: ret
;
-; LMULMAX8RV64-LABEL: si2fp_v3i7_v3f32:
-; LMULMAX8RV64: # %bb.0:
-; LMULMAX8RV64-NEXT: ld a1, 8(a0)
-; LMULMAX8RV64-NEXT: ld a2, 0(a0)
-; LMULMAX8RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX8RV64-NEXT: ld a0, 16(a0)
-; LMULMAX8RV64-NEXT: vmv.v.x v8, a2
-; LMULMAX8RV64-NEXT: vslide1down.vx v8, v8, a1
-; LMULMAX8RV64-NEXT: vslide1down.vx v8, v8, a0
-; LMULMAX8RV64-NEXT: vslidedown.vi v8, v8, 1
-; LMULMAX8RV64-NEXT: vadd.vv v8, v8, v8
-; LMULMAX8RV64-NEXT: vsra.vi v8, v8, 1
-; LMULMAX8RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; LMULMAX8RV64-NEXT: vsext.vf2 v9, v8
-; LMULMAX8RV64-NEXT: vfwcvt.f.x.v v8, v9
-; LMULMAX8RV64-NEXT: ret
+; ZVFH64-LABEL: si2fp_v3i7_v3f32:
+; ZVFH64: # %bb.0:
+; ZVFH64-NEXT: ld a1, 8(a0)
+; ZVFH64-NEXT: ld a2, 0(a0)
+; ZVFH64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; ZVFH64-NEXT: ld a0, 16(a0)
+; ZVFH64-NEXT: vmv.v.x v8, a2
+; ZVFH64-NEXT: vslide1down.vx v8, v8, a1
+; ZVFH64-NEXT: vslide1down.vx v8, v8, a0
+; ZVFH64-NEXT: vslidedown.vi v8, v8, 1
+; ZVFH64-NEXT: vadd.vv v8, v8, v8
+; ZVFH64-NEXT: vsra.vi v8, v8, 1
+; ZVFH64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFH64-NEXT: vsext.vf2 v9, v8
+; ZVFH64-NEXT: vfwcvt.f.x.v v8, v9
+; ZVFH64-NEXT: ret
;
-; LMULMAX1RV32-LABEL: si2fp_v3i7_v3f32:
-; LMULMAX1RV32: # %bb.0:
-; LMULMAX1RV32-NEXT: lw a1, 4(a0)
-; LMULMAX1RV32-NEXT: lw a2, 0(a0)
-; LMULMAX1RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1RV32-NEXT: lw a0, 8(a0)
-; LMULMAX1RV32-NEXT: vmv.v.x v8, a2
-; LMULMAX1RV32-NEXT: vslide1down.vx v8, v8, a1
-; LMULMAX1RV32-NEXT: vslide1down.vx v8, v8, a0
-; LMULMAX1RV32-NEXT: vslidedown.vi v8, v8, 1
-; LMULMAX1RV32-NEXT: vadd.vv v8, v8, v8
-; LMULMAX1RV32-NEXT: vsra.vi v8, v8, 1
-; LMULMAX1RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; LMULMAX1RV32-NEXT: vsext.vf2 v9, v8
-; LMULMAX1RV32-NEXT: vfwcvt.f.x.v v8, v9
-; LMULMAX1RV32-NEXT: ret
+; ZVFHMIN32-LABEL: si2fp_v3i7_v3f32:
+; ZVFHMIN32: # %bb.0:
+; ZVFHMIN32-NEXT: lw a1, 4(a0)
+; ZVFHMIN32-NEXT: lw a2, 0(a0)
+; ZVFHMIN32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; ZVFHMIN32-NEXT: lw a0, 8(a0)
+; ZVFHMIN32-NEXT: vmv.v.x v8, a2
+; ZVFHMIN32-NEXT: vslide1down.vx v8, v8, a1
+; ZVFHMIN32-NEXT: vslide1down.vx v8, v8, a0
+; ZVFHMIN32-NEXT: vslidedown.vi v8, v8, 1
+; ZVFHMIN32-NEXT: vadd.vv v8, v8, v8
+; ZVFHMIN32-NEXT: vsra.vi v8, v8, 1
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN32-NEXT: vsext.vf2 v9, v8
+; ZVFHMIN32-NEXT: vfwcvt.f.x.v v8, v9
+; ZVFHMIN32-NEXT: ret
;
-; LMULMAX1RV64-LABEL: si2fp_v3i7_v3f32:
-; LMULMAX1RV64: # %bb.0:
-; LMULMAX1RV64-NEXT: ld a1, 8(a0)
-; LMULMAX1RV64-NEXT: ld a2, 0(a0)
-; LMULMAX1RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1RV64-NEXT: ld a0, 16(a0)
-; LMULMAX1RV64-NEXT: vmv.v.x v8, a2
-; LMULMAX1RV64-NEXT: vslide1down.vx v8, v8, a1
-; LMULMAX1RV64-NEXT: vslide1down.vx v8, v8, a0
-; LMULMAX1RV64-NEXT: vslidedown.vi v8, v8, 1
-; LMULMAX1RV64-NEXT: vadd.vv v8, v8, v8
-; LMULMAX1RV64-NEXT: vsra.vi v8, v8, 1
-; LMULMAX1RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; LMULMAX1RV64-NEXT: vsext.vf2 v9, v8
-; LMULMAX1RV64-NEXT: vfwcvt.f.x.v v8, v9
-; LMULMAX1RV64-NEXT: ret
-;
-; LMULMAX8RV32ZVFHMIN-LABEL: si2fp_v3i7_v3f32:
-; LMULMAX8RV32ZVFHMIN: # %bb.0:
-; LMULMAX8RV32ZVFHMIN-NEXT: lw a1, 4(a0)
-; LMULMAX8RV32ZVFHMIN-NEXT: lw a2, 0(a0)
-; LMULMAX8RV32ZVFHMIN-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX8RV32ZVFHMIN-NEXT: lw a0, 8(a0)
-; LMULMAX8RV32ZVFHMIN-NEXT: vmv.v.x v8, a2
-; LMULMAX8RV32ZVFHMIN-NEXT: vslide1down.vx v8, v8, a1
-; LMULMAX8RV32ZVFHMIN-NEXT: vslide1down.vx v8, v8, a0
-; LMULMAX8RV32ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1
-; LMULMAX8RV32ZVFHMIN-NEXT: vadd.vv v8, v8, v8
-; LMULMAX8RV32ZVFHMIN-NEXT: vsra.vi v8, v8, 1
-; LMULMAX8RV32ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; LMULMAX8RV32ZVFHMIN-NEXT: vsext.vf2 v9, v8
-; LMULMAX8RV32ZVFHMIN-NEXT: vfwcvt.f.x.v v8, v9
-; LMULMAX8RV32ZVFHMIN-NEXT: ret
-;
-; LMULMAX8RV64ZVFHMIN-LABEL: si2fp_v3i7_v3f32:
-; LMULMAX8RV64ZVFHMIN: # %bb.0:
-; LMULMAX8RV64ZVFHMIN-NEXT: ld a1, 8(a0)
-; LMULMAX8RV64ZVFHMIN-NEXT: ld a2, 0(a0)
-; LMULMAX8RV64ZVFHMIN-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX8RV64ZVFHMIN-NEXT: ld a0, 16(a0)
-; LMULMAX8RV64ZVFHMIN-NEXT: vmv.v.x v8, a2
-; LMULMAX8RV64ZVFHMIN-NEXT: vslide1down.vx v8, v8, a1
-; LMULMAX8RV64ZVFHMIN-NEXT: vslide1down.vx v8, v8, a0
-; LMULMAX8RV64ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1
-; LMULMAX8RV64ZVFHMIN-NEXT: vadd.vv v8, v8, v8
-; LMULMAX8RV64ZVFHMIN-NEXT: vsra.vi v8, v8, 1
-; LMULMAX8RV64ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; LMULMAX8RV64ZVFHMIN-NEXT: vsext.vf2 v9, v8
-; LMULMAX8RV64ZVFHMIN-NEXT: vfwcvt.f.x.v v8, v9
-; LMULMAX8RV64ZVFHMIN-NEXT: ret
+; ZVFHMIN64-LABEL: si2fp_v3i7_v3f32:
+; ZVFHMIN64: # %bb.0:
+; ZVFHMIN64-NEXT: ld a1, 8(a0)
+; ZVFHMIN64-NEXT: ld a2, 0(a0)
+; ZVFHMIN64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; ZVFHMIN64-NEXT: ld a0, 16(a0)
+; ZVFHMIN64-NEXT: vmv.v.x v8, a2
+; ZVFHMIN64-NEXT: vslide1down.vx v8, v8, a1
+; ZVFHMIN64-NEXT: vslide1down.vx v8, v8, a0
+; ZVFHMIN64-NEXT: vslidedown.vi v8, v8, 1
+; ZVFHMIN64-NEXT: vadd.vv v8, v8, v8
+; ZVFHMIN64-NEXT: vsra.vi v8, v8, 1
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN64-NEXT: vsext.vf2 v9, v8
+; ZVFHMIN64-NEXT: vfwcvt.f.x.v v8, v9
+; ZVFHMIN64-NEXT: ret
%z = sitofp <3 x i7> %x to <3 x float>
ret <3 x float> %z
}
; FIXME: This gets expanded instead of widened + promoted
define <3 x float> @ui2fp_v3i7_v3f32(<3 x i7> %x) {
-; LMULMAX8RV32-LABEL: ui2fp_v3i7_v3f32:
-; LMULMAX8RV32: # %bb.0:
-; LMULMAX8RV32-NEXT: lw a1, 4(a0)
-; LMULMAX8RV32-NEXT: lw a2, 0(a0)
-; LMULMAX8RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX8RV32-NEXT: lw a0, 8(a0)
-; LMULMAX8RV32-NEXT: vmv.v.x v8, a2
-; LMULMAX8RV32-NEXT: vslide1down.vx v8, v8, a1
-; LMULMAX8RV32-NEXT: vslide1down.vx v8, v8, a0
-; LMULMAX8RV32-NEXT: vslidedown.vi v8, v8, 1
-; LMULMAX8RV32-NEXT: li a0, 127
-; LMULMAX8RV32-NEXT: vand.vx v8, v8, a0
-; LMULMAX8RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; LMULMAX8RV32-NEXT: vzext.vf2 v9, v8
-; LMULMAX8RV32-NEXT: vfwcvt.f.xu.v v8, v9
-; LMULMAX8RV32-NEXT: ret
-;
-; LMULMAX8RV64-LABEL: ui2fp_v3i7_v3f32:
-; LMULMAX8RV64: # %bb.0:
-; LMULMAX8RV64-NEXT: ld a1, 8(a0)
-; LMULMAX8RV64-NEXT: ld a2, 0(a0)
-; LMULMAX8RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX8RV64-NEXT: ld a0, 16(a0)
-; LMULMAX8RV64-NEXT: vmv.v.x v8, a2
-; LMULMAX8RV64-NEXT: vslide1down.vx v8, v8, a1
-; LMULMAX8RV64-NEXT: vslide1down.vx v8, v8, a0
-; LMULMAX8RV64-NEXT: vslidedown.vi v8, v8, 1
-; LMULMAX8RV64-NEXT: li a0, 127
-; LMULMAX8RV64-NEXT: vand.vx v8, v8, a0
-; LMULMAX8RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; LMULMAX8RV64-NEXT: vzext.vf2 v9, v8
-; LMULMAX8RV64-NEXT: vfwcvt.f.xu.v v8, v9
-; LMULMAX8RV64-NEXT: ret
-;
-; LMULMAX1RV32-LABEL: ui2fp_v3i7_v3f32:
-; LMULMAX1RV32: # %bb.0:
-; LMULMAX1RV32-NEXT: lw a1, 4(a0)
-; LMULMAX1RV32-NEXT: lw a2, 0(a0)
-; LMULMAX1RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1RV32-NEXT: lw a0, 8(a0)
-; LMULMAX1RV32-NEXT: vmv.v.x v8, a2
-; LMULMAX1RV32-NEXT: vslide1down.vx v8, v8, a1
-; LMULMAX1RV32-NEXT: vslide1down.vx v8, v8, a0
-; LMULMAX1RV32-NEXT: vslidedown.vi v8, v8, 1
-; LMULMAX1RV32-NEXT: li a0, 127
-; LMULMAX1RV32-NEXT: vand.vx v8, v8, a0
-; LMULMAX1RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; LMULMAX1RV32-NEXT: vzext.vf2 v9, v8
-; LMULMAX1RV32-NEXT: vfwcvt.f.xu.v v8, v9
-; LMULMAX1RV32-NEXT: ret
+; ZVFH32-LABEL: ui2fp_v3i7_v3f32:
+; ZVFH32: # %bb.0:
+; ZVFH32-NEXT: lw a1, 4(a0)
+; ZVFH32-NEXT: lw a2, 0(a0)
+; ZVFH32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; ZVFH32-NEXT: lw a0, 8(a0)
+; ZVFH32-NEXT: vmv.v.x v8, a2
+; ZVFH32-NEXT: vslide1down.vx v8, v8, a1
+; ZVFH32-NEXT: vslide1down.vx v8, v8, a0
+; ZVFH32-NEXT: vslidedown.vi v8, v8, 1
+; ZVFH32-NEXT: li a0, 127
+; ZVFH32-NEXT: vand.vx v8, v8, a0
+; ZVFH32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFH32-NEXT: vzext.vf2 v9, v8
+; ZVFH32-NEXT: vfwcvt.f.xu.v v8, v9
+; ZVFH32-NEXT: ret
;
-; LMULMAX1RV64-LABEL: ui2fp_v3i7_v3f32:
-; LMULMAX1RV64: # %bb.0:
-; LMULMAX1RV64-NEXT: ld a1, 8(a0)
-; LMULMAX1RV64-NEXT: ld a2, 0(a0)
-; LMULMAX1RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1RV64-NEXT: ld a0, 16(a0)
-; LMULMAX1RV64-NEXT: vmv.v.x v8, a2
-; LMULMAX1RV64-NEXT: vslide1down.vx v8, v8, a1
-; LMULMAX1RV64-NEXT: vslide1down.vx v8, v8, a0
-; LMULMAX1RV64-NEXT: vslidedown.vi v8, v8, 1
-; LMULMAX1RV64-NEXT: li a0, 127
-; LMULMAX1RV64-NEXT: vand.vx v8, v8, a0
-; LMULMAX1RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; LMULMAX1RV64-NEXT: vzext.vf2 v9, v8
-; LMULMAX1RV64-NEXT: vfwcvt.f.xu.v v8, v9
-; LMULMAX1RV64-NEXT: ret
+; ZVFH64-LABEL: ui2fp_v3i7_v3f32:
+; ZVFH64: # %bb.0:
+; ZVFH64-NEXT: ld a1, 8(a0)
+; ZVFH64-NEXT: ld a2, 0(a0)
+; ZVFH64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; ZVFH64-NEXT: ld a0, 16(a0)
+; ZVFH64-NEXT: vmv.v.x v8, a2
+; ZVFH64-NEXT: vslide1down.vx v8, v8, a1
+; ZVFH64-NEXT: vslide1down.vx v8, v8, a0
+; ZVFH64-NEXT: vslidedown.vi v8, v8, 1
+; ZVFH64-NEXT: li a0, 127
+; ZVFH64-NEXT: vand.vx v8, v8, a0
+; ZVFH64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFH64-NEXT: vzext.vf2 v9, v8
+; ZVFH64-NEXT: vfwcvt.f.xu.v v8, v9
+; ZVFH64-NEXT: ret
;
-; LMULMAX8RV32ZVFHMIN-LABEL: ui2fp_v3i7_v3f32:
-; LMULMAX8RV32ZVFHMIN: # %bb.0:
-; LMULMAX8RV32ZVFHMIN-NEXT: lw a1, 4(a0)
-; LMULMAX8RV32ZVFHMIN-NEXT: lw a2, 0(a0)
-; LMULMAX8RV32ZVFHMIN-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX8RV32ZVFHMIN-NEXT: lw a0, 8(a0)
-; LMULMAX8RV32ZVFHMIN-NEXT: vmv.v.x v8, a2
-; LMULMAX8RV32ZVFHMIN-NEXT: vslide1down.vx v8, v8, a1
-; LMULMAX8RV32ZVFHMIN-NEXT: vslide1down.vx v8, v8, a0
-; LMULMAX8RV32ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1
-; LMULMAX8RV32ZVFHMIN-NEXT: li a0, 127
-; LMULMAX8RV32ZVFHMIN-NEXT: vand.vx v8, v8, a0
-; LMULMAX8RV32ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; LMULMAX8RV32ZVFHMIN-NEXT: vzext.vf2 v9, v8
-; LMULMAX8RV32ZVFHMIN-NEXT: vfwcvt.f.xu.v v8, v9
-; LMULMAX8RV32ZVFHMIN-NEXT: ret
+; ZVFHMIN32-LABEL: ui2fp_v3i7_v3f32:
+; ZVFHMIN32: # %bb.0:
+; ZVFHMIN32-NEXT: lw a1, 4(a0)
+; ZVFHMIN32-NEXT: lw a2, 0(a0)
+; ZVFHMIN32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; ZVFHMIN32-NEXT: lw a0, 8(a0)
+; ZVFHMIN32-NEXT: vmv.v.x v8, a2
+; ZVFHMIN32-NEXT: vslide1down.vx v8, v8, a1
+; ZVFHMIN32-NEXT: vslide1down.vx v8, v8, a0
+; ZVFHMIN32-NEXT: vslidedown.vi v8, v8, 1
+; ZVFHMIN32-NEXT: li a0, 127
+; ZVFHMIN32-NEXT: vand.vx v8, v8, a0
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN32-NEXT: vzext.vf2 v9, v8
+; ZVFHMIN32-NEXT: vfwcvt.f.xu.v v8, v9
+; ZVFHMIN32-NEXT: ret
;
-; LMULMAX8RV64ZVFHMIN-LABEL: ui2fp_v3i7_v3f32:
-; LMULMAX8RV64ZVFHMIN: # %bb.0:
-; LMULMAX8RV64ZVFHMIN-NEXT: ld a1, 8(a0)
-; LMULMAX8RV64ZVFHMIN-NEXT: ld a2, 0(a0)
-; LMULMAX8RV64ZVFHMIN-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX8RV64ZVFHMIN-NEXT: ld a0, 16(a0)
-; LMULMAX8RV64ZVFHMIN-NEXT: vmv.v.x v8, a2
-; LMULMAX8RV64ZVFHMIN-NEXT: vslide1down.vx v8, v8, a1
-; LMULMAX8RV64ZVFHMIN-NEXT: vslide1down.vx v8, v8, a0
-; LMULMAX8RV64ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1
-; LMULMAX8RV64ZVFHMIN-NEXT: li a0, 127
-; LMULMAX8RV64ZVFHMIN-NEXT: vand.vx v8, v8, a0
-; LMULMAX8RV64ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; LMULMAX8RV64ZVFHMIN-NEXT: vzext.vf2 v9, v8
-; LMULMAX8RV64ZVFHMIN-NEXT: vfwcvt.f.xu.v v8, v9
-; LMULMAX8RV64ZVFHMIN-NEXT: ret
+; ZVFHMIN64-LABEL: ui2fp_v3i7_v3f32:
+; ZVFHMIN64: # %bb.0:
+; ZVFHMIN64-NEXT: ld a1, 8(a0)
+; ZVFHMIN64-NEXT: ld a2, 0(a0)
+; ZVFHMIN64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; ZVFHMIN64-NEXT: ld a0, 16(a0)
+; ZVFHMIN64-NEXT: vmv.v.x v8, a2
+; ZVFHMIN64-NEXT: vslide1down.vx v8, v8, a1
+; ZVFHMIN64-NEXT: vslide1down.vx v8, v8, a0
+; ZVFHMIN64-NEXT: vslidedown.vi v8, v8, 1
+; ZVFHMIN64-NEXT: li a0, 127
+; ZVFHMIN64-NEXT: vand.vx v8, v8, a0
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN64-NEXT: vzext.vf2 v9, v8
+; ZVFHMIN64-NEXT: vfwcvt.f.xu.v v8, v9
+; ZVFHMIN64-NEXT: ret
%z = uitofp <3 x i7> %x to <3 x float>
ret <3 x float> %z
}
@@ -357,26 +287,13 @@ define <3 x float> @ui2fp_v3i1_v3f32(<3 x i1> %x) {
}
define void @si2fp_v8i32_v8f32(ptr %x, ptr %y) {
-; LMULMAX8-LABEL: si2fp_v8i32_v8f32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX8-NEXT: vle32.v v8, (a0)
-; LMULMAX8-NEXT: vfcvt.f.x.v v8, v8
-; LMULMAX8-NEXT: vse32.v v8, (a1)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: si2fp_v8i32_v8f32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: addi a2, a0, 16
-; LMULMAX1-NEXT: vle32.v v8, (a2)
-; LMULMAX1-NEXT: vle32.v v9, (a0)
-; LMULMAX1-NEXT: vfcvt.f.x.v v8, v8
-; LMULMAX1-NEXT: vfcvt.f.x.v v9, v9
-; LMULMAX1-NEXT: vse32.v v9, (a1)
-; LMULMAX1-NEXT: addi a1, a1, 16
-; LMULMAX1-NEXT: vse32.v v8, (a1)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: si2fp_v8i32_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: vse32.v v8, (a1)
+; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
%d = sitofp <8 x i32> %a to <8 x float>
store <8 x float> %d, ptr %y
@@ -384,26 +301,13 @@ define void @si2fp_v8i32_v8f32(ptr %x, ptr %y) {
}
define void @ui2fp_v8i32_v8f32(ptr %x, ptr %y) {
-; LMULMAX8-LABEL: ui2fp_v8i32_v8f32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX8-NEXT: vle32.v v8, (a0)
-; LMULMAX8-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX8-NEXT: vse32.v v8, (a1)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: ui2fp_v8i32_v8f32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: addi a2, a0, 16
-; LMULMAX1-NEXT: vle32.v v8, (a2)
-; LMULMAX1-NEXT: vle32.v v9, (a0)
-; LMULMAX1-NEXT: vfcvt.f.xu.v v8, v8
-; LMULMAX1-NEXT: vfcvt.f.xu.v v9, v9
-; LMULMAX1-NEXT: vse32.v v9, (a1)
-; LMULMAX1-NEXT: addi a1, a1, 16
-; LMULMAX1-NEXT: vse32.v v8, (a1)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: ui2fp_v8i32_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: vse32.v v8, (a1)
+; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
%d = uitofp <8 x i32> %a to <8 x float>
store <8 x float> %d, ptr %y
@@ -411,61 +315,25 @@ define void @ui2fp_v8i32_v8f32(ptr %x, ptr %y) {
}
define <8 x float> @si2fp_v8i1_v8f32(<8 x i1> %x) {
-; LMULMAX8-LABEL: si2fp_v8i1_v8f32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX8-NEXT: vmv.v.i v8, 0
-; LMULMAX8-NEXT: vmerge.vim v10, v8, -1, v0
-; LMULMAX8-NEXT: vfwcvt.f.x.v v8, v10
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: si2fp_v8i1_v8f32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v9, 0
-; LMULMAX1-NEXT: vmerge.vim v10, v9, -1, v0
-; LMULMAX1-NEXT: vfwcvt.f.x.v v8, v10
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v10, 0
-; LMULMAX1-NEXT: vmerge.vim v10, v10, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v10, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vmsne.vi v0, v10, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vmerge.vim v10, v9, -1, v0
-; LMULMAX1-NEXT: vfwcvt.f.x.v v9, v10
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: si2fp_v8i1_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v10, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v10
+; CHECK-NEXT: ret
%z = sitofp <8 x i1> %x to <8 x float>
ret <8 x float> %z
}
define <8 x float> @ui2fp_v8i1_v8f32(<8 x i1> %x) {
-; LMULMAX8-LABEL: ui2fp_v8i1_v8f32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX8-NEXT: vmv.v.i v8, 0
-; LMULMAX8-NEXT: vmerge.vim v10, v8, 1, v0
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v8, v10
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: ui2fp_v8i1_v8f32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v9, 0
-; LMULMAX1-NEXT: vmerge.vim v10, v9, 1, v0
-; LMULMAX1-NEXT: vfwcvt.f.xu.v v8, v10
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v10, 0
-; LMULMAX1-NEXT: vmerge.vim v10, v10, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v10, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vmsne.vi v0, v10, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vmerge.vim v10, v9, 1, v0
-; LMULMAX1-NEXT: vfwcvt.f.xu.v v9, v10
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: ui2fp_v8i1_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v10, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
+; CHECK-NEXT: ret
%z = uitofp <8 x i1> %x to <8 x float>
ret <8 x float> %z
}
@@ -501,44 +369,14 @@ define void @ui2fp_v2i16_v2f64(ptr %x, ptr %y) {
}
define void @si2fp_v8i16_v8f64(ptr %x, ptr %y) {
-; LMULMAX8-LABEL: si2fp_v8i16_v8f64:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX8-NEXT: vle16.v v8, (a0)
-; LMULMAX8-NEXT: vsext.vf2 v10, v8
-; LMULMAX8-NEXT: vfwcvt.f.x.v v12, v10
-; LMULMAX8-NEXT: vse64.v v12, (a1)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: si2fp_v8i16_v8f64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vle16.v v8, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v9, v8, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vsext.vf2 v10, v9
-; LMULMAX1-NEXT: vfwcvt.f.x.v v9, v10
-; LMULMAX1-NEXT: vsext.vf2 v10, v8
-; LMULMAX1-NEXT: vfwcvt.f.x.v v11, v10
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v8, 4
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vsext.vf2 v10, v8
-; LMULMAX1-NEXT: vfwcvt.f.x.v v12, v10
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vsext.vf2 v10, v8
-; LMULMAX1-NEXT: vfwcvt.f.x.v v8, v10
-; LMULMAX1-NEXT: addi a0, a1, 48
-; LMULMAX1-NEXT: vse64.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, a1, 32
-; LMULMAX1-NEXT: vse64.v v12, (a0)
-; LMULMAX1-NEXT: vse64.v v11, (a1)
-; LMULMAX1-NEXT: addi a1, a1, 16
-; LMULMAX1-NEXT: vse64.v v9, (a1)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: si2fp_v8i16_v8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vfwcvt.f.x.v v12, v10
+; CHECK-NEXT: vse64.v v12, (a1)
+; CHECK-NEXT: ret
%a = load <8 x i16>, ptr %x
%d = sitofp <8 x i16> %a to <8 x double>
store <8 x double> %d, ptr %y
@@ -546,44 +384,14 @@ define void @si2fp_v8i16_v8f64(ptr %x, ptr %y) {
}
define void @ui2fp_v8i16_v8f64(ptr %x, ptr %y) {
-; LMULMAX8-LABEL: ui2fp_v8i16_v8f64:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX8-NEXT: vle16.v v8, (a0)
-; LMULMAX8-NEXT: vzext.vf2 v10, v8
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v12, v10
-; LMULMAX8-NEXT: vse64.v v12, (a1)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: ui2fp_v8i16_v8f64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vle16.v v8, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v9, v8, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vzext.vf2 v10, v9
-; LMULMAX1-NEXT: vfwcvt.f.xu.v v9, v10
-; LMULMAX1-NEXT: vzext.vf2 v10, v8
-; LMULMAX1-NEXT: vfwcvt.f.xu.v v11, v10
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v8, 4
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vzext.vf2 v10, v8
-; LMULMAX1-NEXT: vfwcvt.f.xu.v v12, v10
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vzext.vf2 v10, v8
-; LMULMAX1-NEXT: vfwcvt.f.xu.v v8, v10
-; LMULMAX1-NEXT: addi a0, a1, 48
-; LMULMAX1-NEXT: vse64.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, a1, 32
-; LMULMAX1-NEXT: vse64.v v12, (a0)
-; LMULMAX1-NEXT: vse64.v v11, (a1)
-; LMULMAX1-NEXT: addi a1, a1, 16
-; LMULMAX1-NEXT: vse64.v v9, (a1)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: ui2fp_v8i16_v8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v12, v10
+; CHECK-NEXT: vse64.v v12, (a1)
+; CHECK-NEXT: ret
%a = load <8 x i16>, ptr %x
%d = uitofp <8 x i16> %a to <8 x double>
store <8 x double> %d, ptr %y
@@ -591,103 +399,25 @@ define void @ui2fp_v8i16_v8f64(ptr %x, ptr %y) {
}
define <8 x double> @si2fp_v8i1_v8f64(<8 x i1> %x) {
-; LMULMAX8-LABEL: si2fp_v8i1_v8f64:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX8-NEXT: vmv.v.i v8, 0
-; LMULMAX8-NEXT: vmerge.vim v12, v8, -1, v0
-; LMULMAX8-NEXT: vfwcvt.f.x.v v8, v12
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: si2fp_v8i1_v8f64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vmv1r.v v10, v0
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v11, 0
-; LMULMAX1-NEXT: vmerge.vim v9, v11, -1, v0
-; LMULMAX1-NEXT: vfwcvt.f.x.v v8, v9
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v12, 0
-; LMULMAX1-NEXT: vmerge.vim v9, v12, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v9, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vmsne.vi v0, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vmerge.vim v13, v11, -1, v0
-; LMULMAX1-NEXT: vfwcvt.f.x.v v9, v13
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v13, 0
-; LMULMAX1-NEXT: vmv1r.v v0, v10
-; LMULMAX1-NEXT: vmerge.vim v10, v13, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v10, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vmsne.vi v0, v10, 0
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vmerge.vim v13, v11, -1, v0
-; LMULMAX1-NEXT: vfwcvt.f.x.v v10, v13
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vmerge.vim v12, v12, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v12, v12, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vmsne.vi v0, v12, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vmerge.vim v12, v11, -1, v0
-; LMULMAX1-NEXT: vfwcvt.f.x.v v11, v12
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: si2fp_v8i1_v8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v12, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v12
+; CHECK-NEXT: ret
%z = sitofp <8 x i1> %x to <8 x double>
ret <8 x double> %z
}
define <8 x double> @ui2fp_v8i1_v8f64(<8 x i1> %x) {
-; LMULMAX8-LABEL: ui2fp_v8i1_v8f64:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX8-NEXT: vmv.v.i v8, 0
-; LMULMAX8-NEXT: vmerge.vim v12, v8, 1, v0
-; LMULMAX8-NEXT: vfwcvt.f.xu.v v8, v12
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: ui2fp_v8i1_v8f64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vmv1r.v v10, v0
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v11, 0
-; LMULMAX1-NEXT: vmerge.vim v9, v11, 1, v0
-; LMULMAX1-NEXT: vfwcvt.f.xu.v v8, v9
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v12, 0
-; LMULMAX1-NEXT: vmerge.vim v9, v12, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v9, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vmsne.vi v0, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vmerge.vim v13, v11, 1, v0
-; LMULMAX1-NEXT: vfwcvt.f.xu.v v9, v13
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v13, 0
-; LMULMAX1-NEXT: vmv1r.v v0, v10
-; LMULMAX1-NEXT: vmerge.vim v10, v13, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v10, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vmsne.vi v0, v10, 0
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vmerge.vim v13, v11, 1, v0
-; LMULMAX1-NEXT: vfwcvt.f.xu.v v10, v13
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vmerge.vim v12, v12, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v12, v12, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vmsne.vi v0, v12, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vmerge.vim v12, v11, 1, v0
-; LMULMAX1-NEXT: vfwcvt.f.xu.v v11, v12
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: ui2fp_v8i1_v8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
+; CHECK-NEXT: ret
%z = uitofp <8 x i1> %x to <8 x double>
ret <8 x double> %z
}
@@ -725,107 +455,57 @@ define void @ui2fp_v2i64_v2f16(ptr %x, ptr %y) {
}
define <2 x half> @si2fp_v2i1_v2f16(<2 x i1> %x) {
-; LMULMAX8ZVFH-LABEL: si2fp_v2i1_v2f16:
-; LMULMAX8ZVFH: # %bb.0:
-; LMULMAX8ZVFH-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; LMULMAX8ZVFH-NEXT: vmv.v.i v8, 0
-; LMULMAX8ZVFH-NEXT: vmerge.vim v9, v8, -1, v0
-; LMULMAX8ZVFH-NEXT: vfwcvt.f.x.v v8, v9
-; LMULMAX8ZVFH-NEXT: ret
-;
-; LMULMAX1-LABEL: si2fp_v2i1_v2f16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v8, 0
-; LMULMAX1-NEXT: vmerge.vim v9, v8, -1, v0
-; LMULMAX1-NEXT: vfwcvt.f.x.v v8, v9
-; LMULMAX1-NEXT: ret
+; ZVFH-LABEL: si2fp_v2i1_v2f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; ZVFH-NEXT: vmv.v.i v8, 0
+; ZVFH-NEXT: vmerge.vim v9, v8, -1, v0
+; ZVFH-NEXT: vfwcvt.f.x.v v8, v9
+; ZVFH-NEXT: ret
;
-; LMULMAX8ZVFHMIN-LABEL: si2fp_v2i1_v2f16:
-; LMULMAX8ZVFHMIN: # %bb.0:
-; LMULMAX8ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
-; LMULMAX8ZVFHMIN-NEXT: vmv.v.i v8, 0
-; LMULMAX8ZVFHMIN-NEXT: vmerge.vim v8, v8, -1, v0
-; LMULMAX8ZVFHMIN-NEXT: vfwcvt.f.x.v v9, v8
-; LMULMAX8ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
-; LMULMAX8ZVFHMIN-NEXT: ret
+; ZVFHMIN-LABEL: si2fp_v2i1_v2f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vmv.v.i v8, 0
+; ZVFHMIN-NEXT: vmerge.vim v8, v8, -1, v0
+; ZVFHMIN-NEXT: vfwcvt.f.x.v v9, v8
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
%z = sitofp <2 x i1> %x to <2 x half>
ret <2 x half> %z
}
define <2 x half> @ui2fp_v2i1_v2f16(<2 x i1> %x) {
-; LMULMAX8ZVFH-LABEL: ui2fp_v2i1_v2f16:
-; LMULMAX8ZVFH: # %bb.0:
-; LMULMAX8ZVFH-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; LMULMAX8ZVFH-NEXT: vmv.v.i v8, 0
-; LMULMAX8ZVFH-NEXT: vmerge.vim v9, v8, 1, v0
-; LMULMAX8ZVFH-NEXT: vfwcvt.f.xu.v v8, v9
-; LMULMAX8ZVFH-NEXT: ret
+; ZVFH-LABEL: ui2fp_v2i1_v2f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; ZVFH-NEXT: vmv.v.i v8, 0
+; ZVFH-NEXT: vmerge.vim v9, v8, 1, v0
+; ZVFH-NEXT: vfwcvt.f.xu.v v8, v9
+; ZVFH-NEXT: ret
;
-; LMULMAX1-LABEL: ui2fp_v2i1_v2f16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v8, 0
-; LMULMAX1-NEXT: vmerge.vim v9, v8, 1, v0
-; LMULMAX1-NEXT: vfwcvt.f.xu.v v8, v9
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX8ZVFHMIN-LABEL: ui2fp_v2i1_v2f16:
-; LMULMAX8ZVFHMIN: # %bb.0:
-; LMULMAX8ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
-; LMULMAX8ZVFHMIN-NEXT: vmv.v.i v8, 0
-; LMULMAX8ZVFHMIN-NEXT: vmerge.vim v8, v8, 1, v0
-; LMULMAX8ZVFHMIN-NEXT: vfwcvt.f.xu.v v9, v8
-; LMULMAX8ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
-; LMULMAX8ZVFHMIN-NEXT: ret
+; ZVFHMIN-LABEL: ui2fp_v2i1_v2f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vmv.v.i v8, 0
+; ZVFHMIN-NEXT: vmerge.vim v8, v8, 1, v0
+; ZVFHMIN-NEXT: vfwcvt.f.xu.v v9, v8
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
%z = uitofp <2 x i1> %x to <2 x half>
ret <2 x half> %z
}
define void @si2fp_v8i64_v8f16(ptr %x, ptr %y) {
-; LMULMAX8-LABEL: si2fp_v8i64_v8f16:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX8-NEXT: vle64.v v8, (a0)
-; LMULMAX8-NEXT: vfncvt.f.x.w v12, v8
-; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; LMULMAX8-NEXT: vfncvt.f.f.w v8, v12
-; LMULMAX8-NEXT: vse16.v v8, (a1)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: si2fp_v8i64_v8f16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a2, a0, 48
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vle64.v v8, (a2)
-; LMULMAX1-NEXT: addi a2, a0, 32
-; LMULMAX1-NEXT: vle64.v v9, (a0)
-; LMULMAX1-NEXT: vle64.v v10, (a2)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vle64.v v11, (a0)
-; LMULMAX1-NEXT: vfncvt.f.x.w v12, v9
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vfncvt.f.f.w v9, v12
-; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.f.x.w v12, v11
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vfncvt.f.f.w v11, v12
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v9, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.f.x.w v11, v10
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vfncvt.f.f.w v10, v11
-; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v9, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.f.x.w v10, v8
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vfncvt.f.f.w v8, v10
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v9, v8, 6
-; LMULMAX1-NEXT: vse16.v v9, (a1)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: si2fp_v8i64_v8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vfncvt.f.x.w v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvt.f.f.w v8, v12
+; CHECK-NEXT: vse16.v v8, (a1)
+; CHECK-NEXT: ret
%a = load <8 x i64>, ptr %x
%d = sitofp <8 x i64> %a to <8 x half>
store <8 x half> %d, ptr %y
@@ -833,49 +513,15 @@ define void @si2fp_v8i64_v8f16(ptr %x, ptr %y) {
}
define void @ui2fp_v8i64_v8f16(ptr %x, ptr %y) {
-; LMULMAX8-LABEL: ui2fp_v8i64_v8f16:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX8-NEXT: vle64.v v8, (a0)
-; LMULMAX8-NEXT: vfncvt.f.xu.w v12, v8
-; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; LMULMAX8-NEXT: vfncvt.f.f.w v8, v12
-; LMULMAX8-NEXT: vse16.v v8, (a1)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX1-LABEL: ui2fp_v8i64_v8f16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a2, a0, 48
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vle64.v v8, (a2)
-; LMULMAX1-NEXT: addi a2, a0, 32
-; LMULMAX1-NEXT: vle64.v v9, (a0)
-; LMULMAX1-NEXT: vle64.v v10, (a2)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vle64.v v11, (a0)
-; LMULMAX1-NEXT: vfncvt.f.xu.w v12, v9
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vfncvt.f.f.w v9, v12
-; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.f.xu.w v12, v11
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vfncvt.f.f.w v11, v12
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v9, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.f.xu.w v11, v10
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vfncvt.f.f.w v10, v11
-; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v9, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vfncvt.f.xu.w v10, v8
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; LMULMAX1-NEXT: vfncvt.f.f.w v8, v10
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v9, v8, 6
-; LMULMAX1-NEXT: vse16.v v9, (a1)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: ui2fp_v8i64_v8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vfncvt.f.xu.w v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvt.f.f.w v8, v12
+; CHECK-NEXT: vse16.v v8, (a1)
+; CHECK-NEXT: ret
%a = load <8 x i64>, ptr %x
%d = uitofp <8 x i64> %a to <8 x half>
store <8 x half> %d, ptr %y
@@ -883,59 +529,43 @@ define void @ui2fp_v8i64_v8f16(ptr %x, ptr %y) {
}
define <8 x half> @si2fp_v8i1_v8f16(<8 x i1> %x) {
-; LMULMAX8ZVFH-LABEL: si2fp_v8i1_v8f16:
-; LMULMAX8ZVFH: # %bb.0:
-; LMULMAX8ZVFH-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX8ZVFH-NEXT: vmv.v.i v8, 0
-; LMULMAX8ZVFH-NEXT: vmerge.vim v9, v8, -1, v0
-; LMULMAX8ZVFH-NEXT: vfwcvt.f.x.v v8, v9
-; LMULMAX8ZVFH-NEXT: ret
-;
-; LMULMAX1-LABEL: si2fp_v8i1_v8f16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v8, 0
-; LMULMAX1-NEXT: vmerge.vim v9, v8, -1, v0
-; LMULMAX1-NEXT: vfwcvt.f.x.v v8, v9
-; LMULMAX1-NEXT: ret
+; ZVFH-LABEL: si2fp_v8i1_v8f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; ZVFH-NEXT: vmv.v.i v8, 0
+; ZVFH-NEXT: vmerge.vim v9, v8, -1, v0
+; ZVFH-NEXT: vfwcvt.f.x.v v8, v9
+; ZVFH-NEXT: ret
;
-; LMULMAX8ZVFHMIN-LABEL: si2fp_v8i1_v8f16:
-; LMULMAX8ZVFHMIN: # %bb.0:
-; LMULMAX8ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX8ZVFHMIN-NEXT: vmv.v.i v8, 0
-; LMULMAX8ZVFHMIN-NEXT: vmerge.vim v8, v8, -1, v0
-; LMULMAX8ZVFHMIN-NEXT: vfwcvt.f.x.v v10, v8
-; LMULMAX8ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
-; LMULMAX8ZVFHMIN-NEXT: ret
+; ZVFHMIN-LABEL: si2fp_v8i1_v8f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vmv.v.i v8, 0
+; ZVFHMIN-NEXT: vmerge.vim v8, v8, -1, v0
+; ZVFHMIN-NEXT: vfwcvt.f.x.v v10, v8
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
+; ZVFHMIN-NEXT: ret
%z = sitofp <8 x i1> %x to <8 x half>
ret <8 x half> %z
}
define <8 x half> @ui2fp_v8i1_v8f16(<8 x i1> %x) {
-; LMULMAX8ZVFH-LABEL: ui2fp_v8i1_v8f16:
-; LMULMAX8ZVFH: # %bb.0:
-; LMULMAX8ZVFH-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX8ZVFH-NEXT: vmv.v.i v8, 0
-; LMULMAX8ZVFH-NEXT: vmerge.vim v9, v8, 1, v0
-; LMULMAX8ZVFH-NEXT: vfwcvt.f.xu.v v8, v9
-; LMULMAX8ZVFH-NEXT: ret
-;
-; LMULMAX1-LABEL: ui2fp_v8i1_v8f16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v8, 0
-; LMULMAX1-NEXT: vmerge.vim v9, v8, 1, v0
-; LMULMAX1-NEXT: vfwcvt.f.xu.v v8, v9
-; LMULMAX1-NEXT: ret
+; ZVFH-LABEL: ui2fp_v8i1_v8f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; ZVFH-NEXT: vmv.v.i v8, 0
+; ZVFH-NEXT: vmerge.vim v9, v8, 1, v0
+; ZVFH-NEXT: vfwcvt.f.xu.v v8, v9
+; ZVFH-NEXT: ret
;
-; LMULMAX8ZVFHMIN-LABEL: ui2fp_v8i1_v8f16:
-; LMULMAX8ZVFHMIN: # %bb.0:
-; LMULMAX8ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX8ZVFHMIN-NEXT: vmv.v.i v8, 0
-; LMULMAX8ZVFHMIN-NEXT: vmerge.vim v8, v8, 1, v0
-; LMULMAX8ZVFHMIN-NEXT: vfwcvt.f.xu.v v10, v8
-; LMULMAX8ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
-; LMULMAX8ZVFHMIN-NEXT: ret
+; ZVFHMIN-LABEL: ui2fp_v8i1_v8f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vmv.v.i v8, 0
+; ZVFHMIN-NEXT: vmerge.vim v8, v8, 1, v0
+; ZVFHMIN-NEXT: vfwcvt.f.xu.v v10, v8
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
+; ZVFHMIN-NEXT: ret
%z = uitofp <8 x i1> %x to <8 x half>
ret <8 x half> %z
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
index 5d04587..efb1f72 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
@@ -1,13 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,LMULMAX2
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,LMULMAX1
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,LMULMAX2
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,LMULMAX1
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v -riscv-v-fixed-length-vector-lmul-max=2 -early-live-intervals -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,LMULMAX2
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v -riscv-v-fixed-length-vector-lmul-max=1 -early-live-intervals -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,LMULMAX1
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v -riscv-v-fixed-length-vector-lmul-max=2 -early-live-intervals -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,LMULMAX2
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v -riscv-v-fixed-length-vector-lmul-max=1 -early-live-intervals -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,LMULMAX1
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v -early-live-intervals -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v -early-live-intervals -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
define <vscale x 8 x i32> @insert_nxv8i32_v2i32_0(<vscale x 8 x i32> %vec, ptr %svp) {
; CHECK-LABEL: insert_nxv8i32_v2i32_0:
@@ -49,50 +45,26 @@ define <vscale x 8 x i32> @insert_nxv8i32_v2i32_6(<vscale x 8 x i32> %vec, ptr %
}
define <vscale x 8 x i32> @insert_nxv8i32_v8i32_0(<vscale x 8 x i32> %vec, ptr %svp) {
-; LMULMAX2-LABEL: insert_nxv8i32_v8i32_0:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v12, (a0)
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m4, tu, ma
-; LMULMAX2-NEXT: vmv.v.v v8, v12
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: insert_nxv8i32_v8i32_0:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vle32.v v12, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vle32.v v16, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m4, tu, ma
-; LMULMAX1-NEXT: vmv.v.v v8, v12
-; LMULMAX1-NEXT: vsetivli zero, 8, e32, m4, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v16, 4
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: insert_nxv8i32_v8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v12, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e32, m4, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
%sv = load <8 x i32>, ptr %svp
%v = call <vscale x 8 x i32> @llvm.vector.insert.v8i32.nxv8i32(<vscale x 8 x i32> %vec, <8 x i32> %sv, i64 0)
ret <vscale x 8 x i32> %v
}
define <vscale x 8 x i32> @insert_nxv8i32_v8i32_8(<vscale x 8 x i32> %vec, ptr %svp) {
-; LMULMAX2-LABEL: insert_nxv8i32_v8i32_8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v12, (a0)
-; LMULMAX2-NEXT: vsetivli zero, 16, e32, m4, tu, ma
-; LMULMAX2-NEXT: vslideup.vi v8, v12, 8
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: insert_nxv8i32_v8i32_8:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vle32.v v12, (a1)
-; LMULMAX1-NEXT: vle32.v v16, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 12, e32, m4, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v16, 8
-; LMULMAX1-NEXT: vsetivli zero, 16, e32, m4, tu, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v12, 12
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: insert_nxv8i32_v8i32_8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v12, (a0)
+; CHECK-NEXT: vsetivli zero, 16, e32, m4, tu, ma
+; CHECK-NEXT: vslideup.vi v8, v12, 8
+; CHECK-NEXT: ret
%sv = load <8 x i32>, ptr %svp
%v = call <vscale x 8 x i32> @llvm.vector.insert.v8i32.nxv8i32(<vscale x 8 x i32> %vec, <8 x i32> %sv, i64 8)
ret <vscale x 8 x i32> %v
@@ -160,29 +132,17 @@ define void @insert_v4i32_undef_v2i32_0(ptr %vp, ptr %svp) {
}
define void @insert_v8i32_v2i32_0(ptr %vp, ptr %svp) {
-; LMULMAX2-LABEL: insert_v8i32_v2i32_0:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a1)
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v10, (a0)
-; LMULMAX2-NEXT: vsetivli zero, 2, e32, m2, tu, ma
-; LMULMAX2-NEXT: vmv.v.v v10, v8
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vse32.v v10, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: insert_v8i32_v2i32_0:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vle32.v v8, (a1)
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vle32.v v9, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, tu, ma
-; LMULMAX1-NEXT: vmv.v.v v9, v8
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vse32.v v9, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: insert_v8i32_v2i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v10, (a0)
+; CHECK-NEXT: vsetivli zero, 2, e32, m2, tu, ma
+; CHECK-NEXT: vmv.v.v v10, v8
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vse32.v v10, (a0)
+; CHECK-NEXT: ret
%sv = load <2 x i32>, ptr %svp
%vec = load <8 x i32>, ptr %vp
%v = call <8 x i32> @llvm.vector.insert.v2i32.v8i32(<8 x i32> %vec, <2 x i32> %sv, i64 0)
@@ -191,27 +151,17 @@ define void @insert_v8i32_v2i32_0(ptr %vp, ptr %svp) {
}
define void @insert_v8i32_v2i32_2(ptr %vp, ptr %svp) {
-; LMULMAX2-LABEL: insert_v8i32_v2i32_2:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a1)
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v10, (a0)
-; LMULMAX2-NEXT: vsetivli zero, 4, e32, m2, tu, ma
-; LMULMAX2-NEXT: vslideup.vi v10, v8, 2
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vse32.v v10, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: insert_v8i32_v2i32_2:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vle32.v v8, (a1)
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vle32.v v9, (a0)
-; LMULMAX1-NEXT: vslideup.vi v9, v8, 2
-; LMULMAX1-NEXT: vse32.v v9, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: insert_v8i32_v2i32_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v10, (a0)
+; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, ma
+; CHECK-NEXT: vslideup.vi v10, v8, 2
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vse32.v v10, (a0)
+; CHECK-NEXT: ret
%sv = load <2 x i32>, ptr %svp
%vec = load <8 x i32>, ptr %vp
%v = call <8 x i32> @llvm.vector.insert.v2i32.v8i32(<8 x i32> %vec, <2 x i32> %sv, i64 2)
@@ -220,26 +170,15 @@ define void @insert_v8i32_v2i32_2(ptr %vp, ptr %svp) {
}
define void @insert_v8i32_v2i32_6(ptr %vp, ptr %svp) {
-; LMULMAX2-LABEL: insert_v8i32_v2i32_6:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a1)
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v10, (a0)
-; LMULMAX2-NEXT: vslideup.vi v10, v8, 6
-; LMULMAX2-NEXT: vse32.v v10, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: insert_v8i32_v2i32_6:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vle32.v v8, (a1)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vle32.v v9, (a0)
-; LMULMAX1-NEXT: vslideup.vi v9, v8, 2
-; LMULMAX1-NEXT: vse32.v v9, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: insert_v8i32_v2i32_6:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v10, (a0)
+; CHECK-NEXT: vslideup.vi v10, v8, 6
+; CHECK-NEXT: vse32.v v10, (a0)
+; CHECK-NEXT: ret
%sv = load <2 x i32>, ptr %svp
%vec = load <8 x i32>, ptr %vp
%v = call <8 x i32> @llvm.vector.insert.v2i32.v8i32(<8 x i32> %vec, <2 x i32> %sv, i64 6)
@@ -248,24 +187,14 @@ define void @insert_v8i32_v2i32_6(ptr %vp, ptr %svp) {
}
define void @insert_v8i32_undef_v2i32_6(ptr %vp, ptr %svp) {
-; LMULMAX2-LABEL: insert_v8i32_undef_v2i32_6:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a1)
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vslideup.vi v10, v8, 6
-; LMULMAX2-NEXT: vse32.v v10, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: insert_v8i32_undef_v2i32_6:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-NEXT: vle32.v v8, (a1)
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v9, v8, 2
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vse32.v v9, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: insert_v8i32_undef_v2i32_6:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vslideup.vi v10, v8, 6
+; CHECK-NEXT: vse32.v v10, (a0)
+; CHECK-NEXT: ret
%sv = load <2 x i32>, ptr %svp
%v = call <8 x i32> @llvm.vector.insert.v2i32.v8i32(<8 x i32> undef, <2 x i32> %sv, i64 6)
store <8 x i32> %v, ptr %vp
@@ -310,30 +239,18 @@ define void @insert_v4i16_v2i16_2(ptr %vp, ptr %svp) {
}
define void @insert_v32i1_v8i1_0(ptr %vp, ptr %svp) {
-; LMULMAX2-LABEL: insert_v32i1_v8i1_0:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a2, 32
-; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; LMULMAX2-NEXT: vlm.v v8, (a0)
-; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX2-NEXT: vlm.v v9, (a1)
-; LMULMAX2-NEXT: vsetivli zero, 1, e8, mf4, tu, ma
-; LMULMAX2-NEXT: vmv.v.v v8, v9
-; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; LMULMAX2-NEXT: vsm.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: insert_v32i1_v8i1_0:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vlm.v v8, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vlm.v v9, (a1)
-; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, tu, ma
-; LMULMAX1-NEXT: vmv.v.v v8, v9
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vsm.v v8, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: insert_v32i1_v8i1_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; CHECK-NEXT: vlm.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vlm.v v9, (a1)
+; CHECK-NEXT: vsetivli zero, 1, e8, mf4, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; CHECK-NEXT: vsm.v v8, (a0)
+; CHECK-NEXT: ret
%v = load <32 x i1>, ptr %vp
%sv = load <8 x i1>, ptr %svp
%c = call <32 x i1> @llvm.vector.insert.v8i1.v32i1(<32 x i1> %v, <8 x i1> %sv, i64 0)
@@ -342,31 +259,18 @@ define void @insert_v32i1_v8i1_0(ptr %vp, ptr %svp) {
}
define void @insert_v32i1_v8i1_16(ptr %vp, ptr %svp) {
-; LMULMAX2-LABEL: insert_v32i1_v8i1_16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a2, 32
-; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; LMULMAX2-NEXT: vlm.v v8, (a0)
-; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX2-NEXT: vlm.v v9, (a1)
-; LMULMAX2-NEXT: vsetivli zero, 3, e8, mf4, tu, ma
-; LMULMAX2-NEXT: vslideup.vi v8, v9, 2
-; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; LMULMAX2-NEXT: vsm.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: insert_v32i1_v8i1_16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a0, a0, 2
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vlm.v v8, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vlm.v v9, (a1)
-; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, tu, ma
-; LMULMAX1-NEXT: vmv.v.v v8, v9
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vsm.v v8, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: insert_v32i1_v8i1_16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; CHECK-NEXT: vlm.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vlm.v v9, (a1)
+; CHECK-NEXT: vsetivli zero, 3, e8, mf4, tu, ma
+; CHECK-NEXT: vslideup.vi v8, v9, 2
+; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; CHECK-NEXT: vsm.v v8, (a0)
+; CHECK-NEXT: ret
%v = load <32 x i1>, ptr %vp
%sv = load <8 x i1>, ptr %svp
%c = call <32 x i1> @llvm.vector.insert.v8i1.v32i1(<32 x i1> %v, <8 x i1> %sv, i64 16)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
index e691e63..ed6c01a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
@@ -1181,89 +1181,46 @@ define <8 x i64> @v8xi64_exact_undef_prefix(i64 %a, i64 %b, i64 %c, i64 %d) vsca
define <16 x i8> @buildvec_v16i8_loads_contigous(ptr %p) {
-; RV32-LABEL: buildvec_v16i8_loads_contigous:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -16
-; RV32-NEXT: .cfi_def_cfa_offset 16
-; RV32-NEXT: sw s0, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset s0, -4
-; RV32-NEXT: lbu a1, 1(a0)
-; RV32-NEXT: lbu a2, 2(a0)
-; RV32-NEXT: lbu a3, 3(a0)
-; RV32-NEXT: lbu a4, 4(a0)
-; RV32-NEXT: lbu a5, 5(a0)
-; RV32-NEXT: lbu a6, 6(a0)
-; RV32-NEXT: lbu a7, 7(a0)
-; RV32-NEXT: lbu t0, 8(a0)
-; RV32-NEXT: lbu t1, 9(a0)
-; RV32-NEXT: lbu t2, 10(a0)
-; RV32-NEXT: lbu t3, 11(a0)
-; RV32-NEXT: lbu t4, 12(a0)
-; RV32-NEXT: lbu t5, 13(a0)
-; RV32-NEXT: lbu t6, 14(a0)
-; RV32-NEXT: lbu s0, 15(a0)
-; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; RV32-NEXT: vlse8.v v8, (a0), zero
-; RV32-NEXT: vslide1down.vx v8, v8, a1
-; RV32-NEXT: vslide1down.vx v8, v8, a2
-; RV32-NEXT: vslide1down.vx v8, v8, a3
-; RV32-NEXT: vslide1down.vx v8, v8, a4
-; RV32-NEXT: vslide1down.vx v8, v8, a5
-; RV32-NEXT: vslide1down.vx v8, v8, a6
-; RV32-NEXT: vslide1down.vx v8, v8, a7
-; RV32-NEXT: vslide1down.vx v8, v8, t0
-; RV32-NEXT: vslide1down.vx v8, v8, t1
-; RV32-NEXT: vslide1down.vx v8, v8, t2
-; RV32-NEXT: vslide1down.vx v8, v8, t3
-; RV32-NEXT: vslide1down.vx v8, v8, t4
-; RV32-NEXT: vslide1down.vx v8, v8, t5
-; RV32-NEXT: vslide1down.vx v8, v8, t6
-; RV32-NEXT: vslide1down.vx v8, v8, s0
-; RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 16
-; RV32-NEXT: ret
-;
-; RV64-LABEL: buildvec_v16i8_loads_contigous:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -16
-; RV64-NEXT: .cfi_def_cfa_offset 16
-; RV64-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset s0, -8
-; RV64-NEXT: lbu a1, 1(a0)
-; RV64-NEXT: lbu a2, 2(a0)
-; RV64-NEXT: lbu a3, 3(a0)
-; RV64-NEXT: lbu a4, 4(a0)
-; RV64-NEXT: lbu a5, 5(a0)
-; RV64-NEXT: lbu a6, 6(a0)
-; RV64-NEXT: lbu a7, 7(a0)
-; RV64-NEXT: lbu t0, 8(a0)
-; RV64-NEXT: lbu t1, 9(a0)
-; RV64-NEXT: lbu t2, 10(a0)
-; RV64-NEXT: lbu t3, 11(a0)
-; RV64-NEXT: lbu t4, 12(a0)
-; RV64-NEXT: lbu t5, 13(a0)
-; RV64-NEXT: lbu t6, 14(a0)
-; RV64-NEXT: lbu s0, 15(a0)
-; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; RV64-NEXT: vlse8.v v8, (a0), zero
-; RV64-NEXT: vslide1down.vx v8, v8, a1
-; RV64-NEXT: vslide1down.vx v8, v8, a2
-; RV64-NEXT: vslide1down.vx v8, v8, a3
-; RV64-NEXT: vslide1down.vx v8, v8, a4
-; RV64-NEXT: vslide1down.vx v8, v8, a5
-; RV64-NEXT: vslide1down.vx v8, v8, a6
-; RV64-NEXT: vslide1down.vx v8, v8, a7
-; RV64-NEXT: vslide1down.vx v8, v8, t0
-; RV64-NEXT: vslide1down.vx v8, v8, t1
-; RV64-NEXT: vslide1down.vx v8, v8, t2
-; RV64-NEXT: vslide1down.vx v8, v8, t3
-; RV64-NEXT: vslide1down.vx v8, v8, t4
-; RV64-NEXT: vslide1down.vx v8, v8, t5
-; RV64-NEXT: vslide1down.vx v8, v8, t6
-; RV64-NEXT: vslide1down.vx v8, v8, s0
-; RV64-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 16
-; RV64-NEXT: ret
+; CHECK-LABEL: buildvec_v16i8_loads_contigous:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lbu a1, 1(a0)
+; CHECK-NEXT: lbu a2, 2(a0)
+; CHECK-NEXT: lbu a3, 3(a0)
+; CHECK-NEXT: lbu a4, 4(a0)
+; CHECK-NEXT: lbu a5, 5(a0)
+; CHECK-NEXT: lbu a6, 6(a0)
+; CHECK-NEXT: lbu a7, 7(a0)
+; CHECK-NEXT: lbu t0, 9(a0)
+; CHECK-NEXT: lbu t1, 10(a0)
+; CHECK-NEXT: lbu t2, 11(a0)
+; CHECK-NEXT: lbu t3, 12(a0)
+; CHECK-NEXT: lbu t4, 13(a0)
+; CHECK-NEXT: lbu t5, 14(a0)
+; CHECK-NEXT: lbu t6, 15(a0)
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vlse8.v v8, (a0), zero
+; CHECK-NEXT: addi a0, a0, 8
+; CHECK-NEXT: vslide1down.vx v8, v8, a1
+; CHECK-NEXT: vslide1down.vx v8, v8, a2
+; CHECK-NEXT: vslide1down.vx v8, v8, a3
+; CHECK-NEXT: vslide1down.vx v8, v8, a4
+; CHECK-NEXT: vlse8.v v9, (a0), zero
+; CHECK-NEXT: vslide1down.vx v8, v8, a5
+; CHECK-NEXT: vslide1down.vx v8, v8, a6
+; CHECK-NEXT: vslide1down.vx v10, v8, a7
+; CHECK-NEXT: vslide1down.vx v8, v9, t0
+; CHECK-NEXT: vslide1down.vx v8, v8, t1
+; CHECK-NEXT: vslide1down.vx v8, v8, t2
+; CHECK-NEXT: vslide1down.vx v8, v8, t3
+; CHECK-NEXT: vslide1down.vx v8, v8, t4
+; CHECK-NEXT: vslide1down.vx v8, v8, t5
+; CHECK-NEXT: vslide1down.vx v8, v8, t6
+; CHECK-NEXT: li a0, 255
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
+; CHECK-NEXT: vslidedown.vi v8, v10, 8, v0.t
+; CHECK-NEXT: ret
%p2 = getelementptr i8, ptr %p, i32 1
%p3 = getelementptr i8, ptr %p, i32 2
%p4 = getelementptr i8, ptr %p, i32 3
@@ -1318,89 +1275,46 @@ define <16 x i8> @buildvec_v16i8_loads_contigous(ptr %p) {
define <16 x i8> @buildvec_v16i8_loads_gather(ptr %p) {
-; RV32-LABEL: buildvec_v16i8_loads_gather:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -16
-; RV32-NEXT: .cfi_def_cfa_offset 16
-; RV32-NEXT: sw s0, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset s0, -4
-; RV32-NEXT: lbu a1, 1(a0)
-; RV32-NEXT: lbu a2, 22(a0)
-; RV32-NEXT: lbu a3, 31(a0)
-; RV32-NEXT: lbu a4, 44(a0)
-; RV32-NEXT: lbu a5, 55(a0)
-; RV32-NEXT: lbu a6, 623(a0)
-; RV32-NEXT: lbu a7, 75(a0)
-; RV32-NEXT: lbu t0, 82(a0)
-; RV32-NEXT: lbu t1, 93(a0)
-; RV32-NEXT: lbu t2, 105(a0)
-; RV32-NEXT: lbu t3, 161(a0)
-; RV32-NEXT: lbu t4, 124(a0)
-; RV32-NEXT: lbu t5, 163(a0)
-; RV32-NEXT: lbu t6, 144(a0)
-; RV32-NEXT: lbu s0, 154(a0)
-; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; RV32-NEXT: vlse8.v v8, (a0), zero
-; RV32-NEXT: vslide1down.vx v8, v8, a1
-; RV32-NEXT: vslide1down.vx v8, v8, a2
-; RV32-NEXT: vslide1down.vx v8, v8, a3
-; RV32-NEXT: vslide1down.vx v8, v8, a4
-; RV32-NEXT: vslide1down.vx v8, v8, a5
-; RV32-NEXT: vslide1down.vx v8, v8, a6
-; RV32-NEXT: vslide1down.vx v8, v8, a7
-; RV32-NEXT: vslide1down.vx v8, v8, t0
-; RV32-NEXT: vslide1down.vx v8, v8, t1
-; RV32-NEXT: vslide1down.vx v8, v8, t2
-; RV32-NEXT: vslide1down.vx v8, v8, t3
-; RV32-NEXT: vslide1down.vx v8, v8, t4
-; RV32-NEXT: vslide1down.vx v8, v8, t5
-; RV32-NEXT: vslide1down.vx v8, v8, t6
-; RV32-NEXT: vslide1down.vx v8, v8, s0
-; RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 16
-; RV32-NEXT: ret
-;
-; RV64-LABEL: buildvec_v16i8_loads_gather:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -16
-; RV64-NEXT: .cfi_def_cfa_offset 16
-; RV64-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset s0, -8
-; RV64-NEXT: lbu a1, 1(a0)
-; RV64-NEXT: lbu a2, 22(a0)
-; RV64-NEXT: lbu a3, 31(a0)
-; RV64-NEXT: lbu a4, 44(a0)
-; RV64-NEXT: lbu a5, 55(a0)
-; RV64-NEXT: lbu a6, 623(a0)
-; RV64-NEXT: lbu a7, 75(a0)
-; RV64-NEXT: lbu t0, 82(a0)
-; RV64-NEXT: lbu t1, 93(a0)
-; RV64-NEXT: lbu t2, 105(a0)
-; RV64-NEXT: lbu t3, 161(a0)
-; RV64-NEXT: lbu t4, 124(a0)
-; RV64-NEXT: lbu t5, 163(a0)
-; RV64-NEXT: lbu t6, 144(a0)
-; RV64-NEXT: lbu s0, 154(a0)
-; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; RV64-NEXT: vlse8.v v8, (a0), zero
-; RV64-NEXT: vslide1down.vx v8, v8, a1
-; RV64-NEXT: vslide1down.vx v8, v8, a2
-; RV64-NEXT: vslide1down.vx v8, v8, a3
-; RV64-NEXT: vslide1down.vx v8, v8, a4
-; RV64-NEXT: vslide1down.vx v8, v8, a5
-; RV64-NEXT: vslide1down.vx v8, v8, a6
-; RV64-NEXT: vslide1down.vx v8, v8, a7
-; RV64-NEXT: vslide1down.vx v8, v8, t0
-; RV64-NEXT: vslide1down.vx v8, v8, t1
-; RV64-NEXT: vslide1down.vx v8, v8, t2
-; RV64-NEXT: vslide1down.vx v8, v8, t3
-; RV64-NEXT: vslide1down.vx v8, v8, t4
-; RV64-NEXT: vslide1down.vx v8, v8, t5
-; RV64-NEXT: vslide1down.vx v8, v8, t6
-; RV64-NEXT: vslide1down.vx v8, v8, s0
-; RV64-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 16
-; RV64-NEXT: ret
+; CHECK-LABEL: buildvec_v16i8_loads_gather:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lbu a1, 1(a0)
+; CHECK-NEXT: lbu a2, 22(a0)
+; CHECK-NEXT: lbu a3, 31(a0)
+; CHECK-NEXT: lbu a4, 44(a0)
+; CHECK-NEXT: lbu a5, 55(a0)
+; CHECK-NEXT: lbu a6, 623(a0)
+; CHECK-NEXT: lbu a7, 75(a0)
+; CHECK-NEXT: lbu t0, 93(a0)
+; CHECK-NEXT: lbu t1, 105(a0)
+; CHECK-NEXT: lbu t2, 161(a0)
+; CHECK-NEXT: lbu t3, 124(a0)
+; CHECK-NEXT: lbu t4, 163(a0)
+; CHECK-NEXT: lbu t5, 144(a0)
+; CHECK-NEXT: lbu t6, 154(a0)
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vlse8.v v8, (a0), zero
+; CHECK-NEXT: addi a0, a0, 82
+; CHECK-NEXT: vslide1down.vx v8, v8, a1
+; CHECK-NEXT: vslide1down.vx v8, v8, a2
+; CHECK-NEXT: vslide1down.vx v8, v8, a3
+; CHECK-NEXT: vslide1down.vx v8, v8, a4
+; CHECK-NEXT: vlse8.v v9, (a0), zero
+; CHECK-NEXT: vslide1down.vx v8, v8, a5
+; CHECK-NEXT: vslide1down.vx v8, v8, a6
+; CHECK-NEXT: vslide1down.vx v10, v8, a7
+; CHECK-NEXT: vslide1down.vx v8, v9, t0
+; CHECK-NEXT: vslide1down.vx v8, v8, t1
+; CHECK-NEXT: vslide1down.vx v8, v8, t2
+; CHECK-NEXT: vslide1down.vx v8, v8, t3
+; CHECK-NEXT: vslide1down.vx v8, v8, t4
+; CHECK-NEXT: vslide1down.vx v8, v8, t5
+; CHECK-NEXT: vslide1down.vx v8, v8, t6
+; CHECK-NEXT: li a0, 255
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
+; CHECK-NEXT: vslidedown.vi v8, v10, 8, v0.t
+; CHECK-NEXT: ret
%p2 = getelementptr i8, ptr %p, i32 1
%p3 = getelementptr i8, ptr %p, i32 22
%p4 = getelementptr i8, ptr %p, i32 31
@@ -1560,21 +1474,26 @@ define <16 x i8> @buildvec_v16i8_undef_edges(ptr %p) {
; CHECK-NEXT: lbu a3, 55(a0)
; CHECK-NEXT: lbu a4, 623(a0)
; CHECK-NEXT: lbu a5, 75(a0)
-; CHECK-NEXT: lbu a6, 82(a0)
-; CHECK-NEXT: lbu a7, 93(a0)
-; CHECK-NEXT: lbu t0, 105(a0)
-; CHECK-NEXT: lbu a0, 161(a0)
+; CHECK-NEXT: lbu a6, 93(a0)
+; CHECK-NEXT: lbu a7, 105(a0)
+; CHECK-NEXT: lbu t0, 161(a0)
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; CHECK-NEXT: vlse8.v v8, (a1), zero
+; CHECK-NEXT: addi a0, a0, 82
; CHECK-NEXT: vslide1down.vx v8, v8, a2
+; CHECK-NEXT: vlse8.v v9, (a0), zero
; CHECK-NEXT: vslide1down.vx v8, v8, a3
; CHECK-NEXT: vslide1down.vx v8, v8, a4
-; CHECK-NEXT: vslide1down.vx v8, v8, a5
-; CHECK-NEXT: vslide1down.vx v8, v8, a6
+; CHECK-NEXT: vslide1down.vx v10, v8, a5
+; CHECK-NEXT: vslide1down.vx v8, v9, a6
; CHECK-NEXT: vslide1down.vx v8, v8, a7
; CHECK-NEXT: vslide1down.vx v8, v8, t0
-; CHECK-NEXT: vslide1down.vx v8, v8, a0
; CHECK-NEXT: vslidedown.vi v8, v8, 4
+; CHECK-NEXT: li a0, 255
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
+; CHECK-NEXT: vslidedown.vi v8, v10, 8, v0.t
; CHECK-NEXT: ret
%p4 = getelementptr i8, ptr %p, i32 31
%p5 = getelementptr i8, ptr %p, i32 44
@@ -1615,26 +1534,31 @@ define <16 x i8> @buildvec_v16i8_loads_undef_scattered(ptr %p) {
; CHECK-NEXT: lbu a2, 44(a0)
; CHECK-NEXT: lbu a3, 55(a0)
; CHECK-NEXT: lbu a4, 75(a0)
-; CHECK-NEXT: lbu a5, 82(a0)
-; CHECK-NEXT: lbu a6, 93(a0)
-; CHECK-NEXT: lbu a7, 124(a0)
-; CHECK-NEXT: lbu t0, 144(a0)
-; CHECK-NEXT: lbu t1, 154(a0)
+; CHECK-NEXT: lbu a5, 93(a0)
+; CHECK-NEXT: lbu a6, 124(a0)
+; CHECK-NEXT: lbu a7, 144(a0)
+; CHECK-NEXT: lbu t0, 154(a0)
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; CHECK-NEXT: vlse8.v v8, (a0), zero
+; CHECK-NEXT: addi a0, a0, 82
; CHECK-NEXT: vslide1down.vx v8, v8, a1
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vslide1down.vx v8, v8, a2
+; CHECK-NEXT: vlse8.v v9, (a0), zero
; CHECK-NEXT: vslide1down.vx v8, v8, a3
; CHECK-NEXT: vslidedown.vi v8, v8, 1
-; CHECK-NEXT: vslide1down.vx v8, v8, a4
-; CHECK-NEXT: vslide1down.vx v8, v8, a5
-; CHECK-NEXT: vslide1down.vx v8, v8, a6
+; CHECK-NEXT: vslide1down.vx v10, v8, a4
+; CHECK-NEXT: vslide1down.vx v8, v9, a5
; CHECK-NEXT: vslidedown.vi v8, v8, 2
-; CHECK-NEXT: vslide1down.vx v8, v8, a7
+; CHECK-NEXT: vslide1down.vx v8, v8, a6
; CHECK-NEXT: vslidedown.vi v8, v8, 1
+; CHECK-NEXT: vslide1down.vx v8, v8, a7
; CHECK-NEXT: vslide1down.vx v8, v8, t0
-; CHECK-NEXT: vslide1down.vx v8, v8, t1
+; CHECK-NEXT: li a0, 255
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
+; CHECK-NEXT: vslidedown.vi v8, v10, 8, v0.t
; CHECK-NEXT: ret
%p2 = getelementptr i8, ptr %p, i32 1
%p3 = getelementptr i8, ptr %p, i32 22
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
index 2c3bc2ef..c65f6e5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
@@ -1,10 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s
define void @sext_v4i8_v4i32(ptr %x, ptr %z) {
; CHECK-LABEL: sext_v4i8_v4i32:
@@ -35,36 +31,13 @@ define void @zext_v4i8_v4i32(ptr %x, ptr %z) {
}
define void @sext_v8i8_v8i32(ptr %x, ptr %z) {
-; LMULMAX8-LABEL: sext_v8i8_v8i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX8-NEXT: vle8.v v8, (a0)
-; LMULMAX8-NEXT: vsext.vf4 v10, v8
-; LMULMAX8-NEXT: vse32.v v10, (a1)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX2-LABEL: sext_v8i8_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: vsext.vf4 v10, v8
-; LMULMAX2-NEXT: vse32.v v10, (a1)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: sext_v8i8_v8i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vle8.v v8, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf4 v9, v8
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v8, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf4 v10, v8
-; LMULMAX1-NEXT: addi a0, a1, 16
-; LMULMAX1-NEXT: vse32.v v10, (a0)
-; LMULMAX1-NEXT: vse32.v v9, (a1)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: sext_v8i8_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vse32.v v10, (a1)
+; CHECK-NEXT: ret
%a = load <8 x i8>, ptr %x
%b = sext <8 x i8> %a to <8 x i32>
store <8 x i32> %b, ptr %z
@@ -72,90 +45,14 @@ define void @sext_v8i8_v8i32(ptr %x, ptr %z) {
}
define void @sext_v32i8_v32i32(ptr %x, ptr %z) {
-; LMULMAX8-LABEL: sext_v32i8_v32i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a2, 32
-; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, ma
-; LMULMAX8-NEXT: vle8.v v8, (a0)
-; LMULMAX8-NEXT: vsext.vf4 v16, v8
-; LMULMAX8-NEXT: vse32.v v16, (a1)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX2-LABEL: sext_v32i8_v32i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a2, 32
-; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: vsetivli zero, 8, e8, m1, ta, ma
-; LMULMAX2-NEXT: vslidedown.vi v10, v8, 8
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vsext.vf4 v12, v10
-; LMULMAX2-NEXT: vsext.vf4 v10, v8
-; LMULMAX2-NEXT: vsetivli zero, 16, e8, m2, ta, ma
-; LMULMAX2-NEXT: vslidedown.vi v8, v8, 16
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vsext.vf4 v14, v8
-; LMULMAX2-NEXT: vsetivli zero, 8, e8, m1, ta, ma
-; LMULMAX2-NEXT: vslidedown.vi v8, v8, 8
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vsext.vf4 v16, v8
-; LMULMAX2-NEXT: addi a0, a1, 96
-; LMULMAX2-NEXT: vse32.v v16, (a0)
-; LMULMAX2-NEXT: addi a0, a1, 64
-; LMULMAX2-NEXT: vse32.v v14, (a0)
-; LMULMAX2-NEXT: vse32.v v10, (a1)
-; LMULMAX2-NEXT: addi a0, a1, 32
-; LMULMAX2-NEXT: vse32.v v12, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: sext_v32i8_v32i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: addi a2, a0, 16
-; LMULMAX1-NEXT: vle8.v v8, (a2)
-; LMULMAX1-NEXT: vle8.v v9, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v10, v8, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf4 v11, v10
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v10, v9, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf4 v12, v10
-; LMULMAX1-NEXT: vsext.vf4 v10, v8
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v8, 8
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf4 v13, v8
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v8, v8, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf4 v14, v8
-; LMULMAX1-NEXT: vsext.vf4 v8, v9
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v9, v9, 8
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf4 v15, v9
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslidedown.vi v9, v9, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vsext.vf4 v16, v9
-; LMULMAX1-NEXT: addi a0, a1, 48
-; LMULMAX1-NEXT: vse32.v v16, (a0)
-; LMULMAX1-NEXT: addi a0, a1, 32
-; LMULMAX1-NEXT: vse32.v v15, (a0)
-; LMULMAX1-NEXT: vse32.v v8, (a1)
-; LMULMAX1-NEXT: addi a0, a1, 112
-; LMULMAX1-NEXT: vse32.v v14, (a0)
-; LMULMAX1-NEXT: addi a0, a1, 96
-; LMULMAX1-NEXT: vse32.v v13, (a0)
-; LMULMAX1-NEXT: addi a0, a1, 64
-; LMULMAX1-NEXT: vse32.v v10, (a0)
-; LMULMAX1-NEXT: addi a0, a1, 16
-; LMULMAX1-NEXT: vse32.v v12, (a0)
-; LMULMAX1-NEXT: addi a0, a1, 80
-; LMULMAX1-NEXT: vse32.v v11, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: sext_v32i8_v32i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsext.vf4 v16, v8
+; CHECK-NEXT: vse32.v v16, (a1)
+; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %x
%b = sext <32 x i8> %a to <32 x i32>
store <32 x i32> %b, ptr %z
@@ -179,43 +76,15 @@ define void @trunc_v4i8_v4i32(ptr %x, ptr %z) {
}
define void @trunc_v8i8_v8i32(ptr %x, ptr %z) {
-; LMULMAX8-LABEL: trunc_v8i8_v8i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX8-NEXT: vle32.v v8, (a0)
-; LMULMAX8-NEXT: vnsrl.wi v10, v8, 0
-; LMULMAX8-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; LMULMAX8-NEXT: vnsrl.wi v8, v10, 0
-; LMULMAX8-NEXT: vse8.v v8, (a1)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX2-LABEL: trunc_v8i8_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vnsrl.wi v10, v8, 0
-; LMULMAX2-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; LMULMAX2-NEXT: vnsrl.wi v8, v10, 0
-; LMULMAX2-NEXT: vse8.v v8, (a1)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: trunc_v8i8_v8i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vle32.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vle32.v v9, (a0)
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
-; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
-; LMULMAX1-NEXT: vse8.v v8, (a1)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: trunc_v8i8_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vnsrl.wi v10, v8, 0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v10, 0
+; CHECK-NEXT: vse8.v v8, (a1)
+; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
%b = trunc <8 x i32> %a to <8 x i8>
store <8 x i8> %b, ptr %z
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
index df7a989..60202cf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
@@ -1,10 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,LMULMAX8-RV32
-; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2,LMULMAX2-RV32
-; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1-RV32
-; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,LMULMAX8-RV64
-; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2,LMULMAX2-RV64
-; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1-RV64
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
define void @splat_v16i8(ptr %x, i8 %y) {
; CHECK-LABEL: splat_v16i8:
@@ -46,65 +42,25 @@ define void @splat_v4i32(ptr %x, i32 %y) {
}
define void @splat_v2i64(ptr %x, i64 %y) {
-; LMULMAX8-RV32-LABEL: splat_v2i64:
-; LMULMAX8-RV32: # %bb.0:
-; LMULMAX8-RV32-NEXT: addi sp, sp, -16
-; LMULMAX8-RV32-NEXT: .cfi_def_cfa_offset 16
-; LMULMAX8-RV32-NEXT: sw a2, 12(sp)
-; LMULMAX8-RV32-NEXT: sw a1, 8(sp)
-; LMULMAX8-RV32-NEXT: addi a1, sp, 8
-; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX8-RV32-NEXT: vlse64.v v8, (a1), zero
-; LMULMAX8-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX8-RV32-NEXT: addi sp, sp, 16
-; LMULMAX8-RV32-NEXT: ret
-;
-; LMULMAX2-RV32-LABEL: splat_v2i64:
-; LMULMAX2-RV32: # %bb.0:
-; LMULMAX2-RV32-NEXT: addi sp, sp, -16
-; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 16
-; LMULMAX2-RV32-NEXT: sw a2, 12(sp)
-; LMULMAX2-RV32-NEXT: sw a1, 8(sp)
-; LMULMAX2-RV32-NEXT: addi a1, sp, 8
-; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV32-NEXT: vlse64.v v8, (a1), zero
-; LMULMAX2-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32-NEXT: addi sp, sp, 16
-; LMULMAX2-RV32-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: splat_v2i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi sp, sp, -16
-; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 16
-; LMULMAX1-RV32-NEXT: sw a2, 12(sp)
-; LMULMAX1-RV32-NEXT: sw a1, 8(sp)
-; LMULMAX1-RV32-NEXT: addi a1, sp, 8
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vlse64.v v8, (a1), zero
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi sp, sp, 16
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX8-RV64-LABEL: splat_v2i64:
-; LMULMAX8-RV64: # %bb.0:
-; LMULMAX8-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX8-RV64-NEXT: vmv.v.x v8, a1
-; LMULMAX8-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX8-RV64-NEXT: ret
-;
-; LMULMAX2-RV64-LABEL: splat_v2i64:
-; LMULMAX2-RV64: # %bb.0:
-; LMULMAX2-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV64-NEXT: vmv.v.x v8, a1
-; LMULMAX2-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: splat_v2i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vmv.v.x v8, a1
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: ret
+; RV32-LABEL: splat_v2i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a2, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v8, (a1), zero
+; RV32-NEXT: vse64.v v8, (a0)
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: splat_v2i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a1
+; RV64-NEXT: vse64.v v8, (a0)
+; RV64-NEXT: ret
%a = insertelement <2 x i64> poison, i64 %y, i32 0
%b = shufflevector <2 x i64> %a, <2 x i64> poison, <2 x i32> zeroinitializer
store <2 x i64> %b, ptr %x
@@ -112,30 +68,13 @@ define void @splat_v2i64(ptr %x, i64 %y) {
}
define void @splat_v32i8(ptr %x, i8 %y) {
-; LMULMAX8-LABEL: splat_v32i8:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a2, 32
-; LMULMAX8-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; LMULMAX8-NEXT: vmv.v.x v8, a1
-; LMULMAX8-NEXT: vse8.v v8, (a0)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX2-LABEL: splat_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a2, 32
-; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; LMULMAX2-NEXT: vmv.v.x v8, a1
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: splat_v32i8:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vmv.v.x v8, a1
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vse8.v v8, (a1)
-; LMULMAX1-NEXT: vse8.v v8, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: splat_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%a = insertelement <32 x i8> poison, i8 %y, i32 0
%b = shufflevector <32 x i8> %a, <32 x i8> poison, <32 x i32> zeroinitializer
store <32 x i8> %b, ptr %x
@@ -143,28 +82,12 @@ define void @splat_v32i8(ptr %x, i8 %y) {
}
define void @splat_v16i16(ptr %x, i16 %y) {
-; LMULMAX8-LABEL: splat_v16i16:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX8-NEXT: vmv.v.x v8, a1
-; LMULMAX8-NEXT: vse16.v v8, (a0)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX2-LABEL: splat_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vmv.v.x v8, a1
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: splat_v16i16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vmv.v.x v8, a1
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vse16.v v8, (a1)
-; LMULMAX1-NEXT: vse16.v v8, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: splat_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = insertelement <16 x i16> poison, i16 %y, i32 0
%b = shufflevector <16 x i16> %a, <16 x i16> poison, <16 x i32> zeroinitializer
store <16 x i16> %b, ptr %x
@@ -172,28 +95,12 @@ define void @splat_v16i16(ptr %x, i16 %y) {
}
define void @splat_v8i32(ptr %x, i32 %y) {
-; LMULMAX8-LABEL: splat_v8i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX8-NEXT: vmv.v.x v8, a1
-; LMULMAX8-NEXT: vse32.v v8, (a0)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX2-LABEL: splat_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vmv.v.x v8, a1
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: splat_v8i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vmv.v.x v8, a1
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vse32.v v8, (a1)
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: splat_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = insertelement <8 x i32> poison, i32 %y, i32 0
%b = shufflevector <8 x i32> %a, <8 x i32> poison, <8 x i32> zeroinitializer
store <8 x i32> %b, ptr %x
@@ -201,65 +108,25 @@ define void @splat_v8i32(ptr %x, i32 %y) {
}
define void @splat_v4i64(ptr %x, i64 %y) {
-; LMULMAX8-RV32-LABEL: splat_v4i64:
-; LMULMAX8-RV32: # %bb.0:
-; LMULMAX8-RV32-NEXT: addi sp, sp, -16
-; LMULMAX8-RV32-NEXT: .cfi_def_cfa_offset 16
-; LMULMAX8-RV32-NEXT: sw a2, 12(sp)
-; LMULMAX8-RV32-NEXT: sw a1, 8(sp)
-; LMULMAX8-RV32-NEXT: addi a1, sp, 8
-; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX8-RV32-NEXT: vlse64.v v8, (a1), zero
-; LMULMAX8-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX8-RV32-NEXT: addi sp, sp, 16
-; LMULMAX8-RV32-NEXT: ret
-;
-; LMULMAX2-RV32-LABEL: splat_v4i64:
-; LMULMAX2-RV32: # %bb.0:
-; LMULMAX2-RV32-NEXT: addi sp, sp, -16
-; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 16
-; LMULMAX2-RV32-NEXT: sw a2, 12(sp)
-; LMULMAX2-RV32-NEXT: sw a1, 8(sp)
-; LMULMAX2-RV32-NEXT: addi a1, sp, 8
-; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vlse64.v v8, (a1), zero
-; LMULMAX2-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32-NEXT: addi sp, sp, 16
-; LMULMAX2-RV32-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: splat_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.i v0, 5
-; LMULMAX1-RV32-NEXT: vmv.v.x v8, a2
-; LMULMAX1-RV32-NEXT: vmerge.vxm v8, v8, a1, v0
-; LMULMAX1-RV32-NEXT: addi a1, a0, 16
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a1)
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX8-RV64-LABEL: splat_v4i64:
-; LMULMAX8-RV64: # %bb.0:
-; LMULMAX8-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX8-RV64-NEXT: vmv.v.x v8, a1
-; LMULMAX8-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX8-RV64-NEXT: ret
-;
-; LMULMAX2-RV64-LABEL: splat_v4i64:
-; LMULMAX2-RV64: # %bb.0:
-; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV64-NEXT: vmv.v.x v8, a1
-; LMULMAX2-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: splat_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vmv.v.x v8, a1
-; LMULMAX1-RV64-NEXT: addi a1, a0, 16
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a1)
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: ret
+; RV32-LABEL: splat_v4i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a2, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v8, (a1), zero
+; RV32-NEXT: vse64.v v8, (a0)
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: splat_v4i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vmv.v.x v8, a1
+; RV64-NEXT: vse64.v v8, (a0)
+; RV64-NEXT: ret
%a = insertelement <4 x i64> poison, i64 %y, i32 0
%b = shufflevector <4 x i64> %a, <4 x i64> poison, <4 x i32> zeroinitializer
store <4 x i64> %b, ptr %x
@@ -319,30 +186,13 @@ define void @splat_zero_v2i64(ptr %x) {
}
define void @splat_zero_v32i8(ptr %x) {
-; LMULMAX8-LABEL: splat_zero_v32i8:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a1, 32
-; LMULMAX8-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; LMULMAX8-NEXT: vmv.v.i v8, 0
-; LMULMAX8-NEXT: vse8.v v8, (a0)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX2-LABEL: splat_zero_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a1, 32
-; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; LMULMAX2-NEXT: vmv.v.i v8, 0
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: splat_zero_v32i8:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v8, 0
-; LMULMAX1-NEXT: vse8.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vse8.v v8, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: splat_zero_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 32
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%a = insertelement <32 x i8> poison, i8 0, i32 0
%b = shufflevector <32 x i8> %a, <32 x i8> poison, <32 x i32> zeroinitializer
store <32 x i8> %b, ptr %x
@@ -350,28 +200,12 @@ define void @splat_zero_v32i8(ptr %x) {
}
define void @splat_zero_v16i16(ptr %x) {
-; LMULMAX8-LABEL: splat_zero_v16i16:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX8-NEXT: vmv.v.i v8, 0
-; LMULMAX8-NEXT: vse16.v v8, (a0)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX2-LABEL: splat_zero_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vmv.v.i v8, 0
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: splat_zero_v16i16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v8, 0
-; LMULMAX1-NEXT: vse16.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vse16.v v8, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: splat_zero_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = insertelement <16 x i16> poison, i16 0, i32 0
%b = shufflevector <16 x i16> %a, <16 x i16> poison, <16 x i32> zeroinitializer
store <16 x i16> %b, ptr %x
@@ -379,28 +213,12 @@ define void @splat_zero_v16i16(ptr %x) {
}
define void @splat_zero_v8i32(ptr %x) {
-; LMULMAX8-LABEL: splat_zero_v8i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX8-NEXT: vmv.v.i v8, 0
-; LMULMAX8-NEXT: vse32.v v8, (a0)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX2-LABEL: splat_zero_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vmv.v.i v8, 0
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: splat_zero_v8i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v8, 0
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: splat_zero_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = insertelement <8 x i32> poison, i32 0, i32 0
%b = shufflevector <8 x i32> %a, <8 x i32> poison, <8 x i32> zeroinitializer
store <8 x i32> %b, ptr %x
@@ -408,37 +226,12 @@ define void @splat_zero_v8i32(ptr %x) {
}
define void @splat_zero_v4i64(ptr %x) {
-; LMULMAX8-LABEL: splat_zero_v4i64:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX8-NEXT: vmv.v.i v8, 0
-; LMULMAX8-NEXT: vse64.v v8, (a0)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX2-LABEL: splat_zero_v4i64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vmv.v.i v8, 0
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: splat_zero_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.i v8, 0
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a0, a0, 16
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: splat_zero_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vmv.v.i v8, 0
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a0, a0, 16
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: splat_zero_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
%a = insertelement <4 x i64> poison, i64 0, i32 0
%b = shufflevector <4 x i64> %a, <4 x i64> poison, <4 x i32> zeroinitializer
store <4 x i64> %b, ptr %x
@@ -467,81 +260,33 @@ define void @splat_zero_v2i16_unaligned(ptr %p) {
}
define void @splat_zero_v4i16(ptr %p) {
-; LMULMAX8-RV32-LABEL: splat_zero_v4i16:
-; LMULMAX8-RV32: # %bb.0:
-; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX8-RV32-NEXT: vmv.v.i v8, 0
-; LMULMAX8-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX8-RV32-NEXT: ret
-;
-; LMULMAX2-RV32-LABEL: splat_zero_v4i16:
-; LMULMAX2-RV32: # %bb.0:
-; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX2-RV32-NEXT: vmv.v.i v8, 0
-; LMULMAX2-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV32-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: splat_zero_v4i16:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.i v8, 0
-; LMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX8-RV64-LABEL: splat_zero_v4i16:
-; LMULMAX8-RV64: # %bb.0:
-; LMULMAX8-RV64-NEXT: sd zero, 0(a0)
-; LMULMAX8-RV64-NEXT: ret
-;
-; LMULMAX2-RV64-LABEL: splat_zero_v4i16:
-; LMULMAX2-RV64: # %bb.0:
-; LMULMAX2-RV64-NEXT: sd zero, 0(a0)
-; LMULMAX2-RV64-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: splat_zero_v4i16:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: sd zero, 0(a0)
-; LMULMAX1-RV64-NEXT: ret
+; RV32-LABEL: splat_zero_v4i16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT: vmv.v.i v8, 0
+; RV32-NEXT: vse16.v v8, (a0)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: splat_zero_v4i16:
+; RV64: # %bb.0:
+; RV64-NEXT: sd zero, 0(a0)
+; RV64-NEXT: ret
store <4 x i16> zeroinitializer, ptr %p
ret void
}
define void @splat_zero_v2i32(ptr %p) {
-; LMULMAX8-RV32-LABEL: splat_zero_v2i32:
-; LMULMAX8-RV32: # %bb.0:
-; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX8-RV32-NEXT: vmv.v.i v8, 0
-; LMULMAX8-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX8-RV32-NEXT: ret
-;
-; LMULMAX2-RV32-LABEL: splat_zero_v2i32:
-; LMULMAX2-RV32: # %bb.0:
-; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX2-RV32-NEXT: vmv.v.i v8, 0
-; LMULMAX2-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: splat_zero_v2i32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.i v8, 0
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX8-RV64-LABEL: splat_zero_v2i32:
-; LMULMAX8-RV64: # %bb.0:
-; LMULMAX8-RV64-NEXT: sd zero, 0(a0)
-; LMULMAX8-RV64-NEXT: ret
-;
-; LMULMAX2-RV64-LABEL: splat_zero_v2i32:
-; LMULMAX2-RV64: # %bb.0:
-; LMULMAX2-RV64-NEXT: sd zero, 0(a0)
-; LMULMAX2-RV64-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: splat_zero_v2i32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: sd zero, 0(a0)
-; LMULMAX1-RV64-NEXT: ret
+; RV32-LABEL: splat_zero_v2i32:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT: vmv.v.i v8, 0
+; RV32-NEXT: vse32.v v8, (a0)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: splat_zero_v2i32:
+; RV64: # %bb.0:
+; RV64-NEXT: sd zero, 0(a0)
+; RV64-NEXT: ret
store <2 x i32> zeroinitializer, ptr %p
ret void
}
@@ -612,30 +357,13 @@ define void @splat_allones_v2i64(ptr %x) {
}
define void @splat_allones_v32i8(ptr %x) {
-; LMULMAX8-LABEL: splat_allones_v32i8:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a1, 32
-; LMULMAX8-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; LMULMAX8-NEXT: vmv.v.i v8, -1
-; LMULMAX8-NEXT: vse8.v v8, (a0)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX2-LABEL: splat_allones_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a1, 32
-; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; LMULMAX2-NEXT: vmv.v.i v8, -1
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: splat_allones_v32i8:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v8, -1
-; LMULMAX1-NEXT: vse8.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vse8.v v8, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: splat_allones_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 32
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, -1
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%a = insertelement <32 x i8> poison, i8 -1, i32 0
%b = shufflevector <32 x i8> %a, <32 x i8> poison, <32 x i32> zeroinitializer
store <32 x i8> %b, ptr %x
@@ -643,28 +371,12 @@ define void @splat_allones_v32i8(ptr %x) {
}
define void @splat_allones_v16i16(ptr %x) {
-; LMULMAX8-LABEL: splat_allones_v16i16:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX8-NEXT: vmv.v.i v8, -1
-; LMULMAX8-NEXT: vse16.v v8, (a0)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX2-LABEL: splat_allones_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vmv.v.i v8, -1
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: splat_allones_v16i16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v8, -1
-; LMULMAX1-NEXT: vse16.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vse16.v v8, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: splat_allones_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, -1
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = insertelement <16 x i16> poison, i16 -1, i32 0
%b = shufflevector <16 x i16> %a, <16 x i16> poison, <16 x i32> zeroinitializer
store <16 x i16> %b, ptr %x
@@ -672,28 +384,12 @@ define void @splat_allones_v16i16(ptr %x) {
}
define void @splat_allones_v8i32(ptr %x) {
-; LMULMAX8-LABEL: splat_allones_v8i32:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX8-NEXT: vmv.v.i v8, -1
-; LMULMAX8-NEXT: vse32.v v8, (a0)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX2-LABEL: splat_allones_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vmv.v.i v8, -1
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: splat_allones_v8i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v8, -1
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: addi a0, a0, 16
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: splat_allones_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, -1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = insertelement <8 x i32> poison, i32 -1, i32 0
%b = shufflevector <8 x i32> %a, <8 x i32> poison, <8 x i32> zeroinitializer
store <8 x i32> %b, ptr %x
@@ -701,37 +397,12 @@ define void @splat_allones_v8i32(ptr %x) {
}
define void @splat_allones_v4i64(ptr %x) {
-; LMULMAX8-LABEL: splat_allones_v4i64:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX8-NEXT: vmv.v.i v8, -1
-; LMULMAX8-NEXT: vse64.v v8, (a0)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX2-LABEL: splat_allones_v4i64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vmv.v.i v8, -1
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: splat_allones_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.i v8, -1
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a0, a0, 16
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: splat_allones_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vmv.v.i v8, -1
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a0, a0, 16
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: splat_allones_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, -1
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
%a = insertelement <4 x i64> poison, i64 -1, i32 0
%b = shufflevector <4 x i64> %a, <4 x i64> poison, <4 x i32> zeroinitializer
store <4 x i64> %b, ptr %x
@@ -743,48 +414,13 @@ define void @splat_allones_v4i64(ptr %x) {
; FIXME: We should prevent this and use the implicit sign extension of vmv.v.x
; with SEW=64 on RV32.
define void @splat_allones_with_use_v4i64(ptr %x) {
-; LMULMAX8-LABEL: splat_allones_with_use_v4i64:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX8-NEXT: vle64.v v8, (a0)
-; LMULMAX8-NEXT: vadd.vi v8, v8, -1
-; LMULMAX8-NEXT: vse64.v v8, (a0)
-; LMULMAX8-NEXT: ret
-;
-; LMULMAX2-LABEL: splat_allones_with_use_v4i64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vadd.vi v8, v8, -1
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: splat_allones_with_use_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a1, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a1)
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.i v10, -1
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vadd.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v10
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a1)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: splat_allones_with_use_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: addi a1, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a1)
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a0)
-; LMULMAX1-RV64-NEXT: vadd.vi v8, v8, -1
-; LMULMAX1-RV64-NEXT: vadd.vi v9, v9, -1
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a1)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: splat_allones_with_use_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vadd.vi v8, v8, -1
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <4 x i64>, ptr %x
%b = add <4 x i64> %a, <i64 -1, i64 -1, i64 -1, i64 -1>
store <4 x i64> %b, ptr %x
@@ -796,171 +432,28 @@ define void @splat_allones_with_use_v4i64(ptr %x) {
; which exceeded maximum-expected size of 512. The scalable container type of
; nxv8i64 should have been used instead.
define void @vadd_vx_v16i64(ptr %a, i64 %b, ptr %c) {
-; LMULMAX8-RV32-LABEL: vadd_vx_v16i64:
-; LMULMAX8-RV32: # %bb.0:
-; LMULMAX8-RV32-NEXT: addi sp, sp, -16
-; LMULMAX8-RV32-NEXT: .cfi_def_cfa_offset 16
-; LMULMAX8-RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; LMULMAX8-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX8-RV32-NEXT: sw a2, 12(sp)
-; LMULMAX8-RV32-NEXT: sw a1, 8(sp)
-; LMULMAX8-RV32-NEXT: addi a0, sp, 8
-; LMULMAX8-RV32-NEXT: vlse64.v v16, (a0), zero
-; LMULMAX8-RV32-NEXT: vadd.vv v8, v8, v16
-; LMULMAX8-RV32-NEXT: vse64.v v8, (a3)
-; LMULMAX8-RV32-NEXT: addi sp, sp, 16
-; LMULMAX8-RV32-NEXT: ret
-;
-; LMULMAX2-RV32-LABEL: vadd_vx_v16i64:
-; LMULMAX2-RV32: # %bb.0:
-; LMULMAX2-RV32-NEXT: addi a4, a0, 64
-; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vle64.v v8, (a4)
-; LMULMAX2-RV32-NEXT: addi a4, a0, 96
-; LMULMAX2-RV32-NEXT: vle64.v v10, (a4)
-; LMULMAX2-RV32-NEXT: vle64.v v12, (a0)
-; LMULMAX2-RV32-NEXT: addi a0, a0, 32
-; LMULMAX2-RV32-NEXT: vle64.v v14, (a0)
-; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV32-NEXT: li a0, 85
-; LMULMAX2-RV32-NEXT: vmv.s.x v0, a0
-; LMULMAX2-RV32-NEXT: vmv.v.x v16, a2
-; LMULMAX2-RV32-NEXT: vmerge.vxm v16, v16, a1, v0
-; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vadd.vv v14, v14, v16
-; LMULMAX2-RV32-NEXT: vadd.vv v12, v12, v16
-; LMULMAX2-RV32-NEXT: vadd.vv v10, v10, v16
-; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v16
-; LMULMAX2-RV32-NEXT: addi a0, a3, 64
-; LMULMAX2-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32-NEXT: addi a0, a3, 96
-; LMULMAX2-RV32-NEXT: vse64.v v10, (a0)
-; LMULMAX2-RV32-NEXT: vse64.v v12, (a3)
-; LMULMAX2-RV32-NEXT: addi a0, a3, 32
-; LMULMAX2-RV32-NEXT: vse64.v v14, (a0)
-; LMULMAX2-RV32-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: vadd_vx_v16i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a4, a0, 96
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a4)
-; LMULMAX1-RV32-NEXT: addi a4, a0, 112
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a4)
-; LMULMAX1-RV32-NEXT: addi a4, a0, 64
-; LMULMAX1-RV32-NEXT: vle64.v v10, (a4)
-; LMULMAX1-RV32-NEXT: addi a4, a0, 80
-; LMULMAX1-RV32-NEXT: vle64.v v11, (a4)
-; LMULMAX1-RV32-NEXT: addi a4, a0, 32
-; LMULMAX1-RV32-NEXT: vle64.v v12, (a4)
-; LMULMAX1-RV32-NEXT: addi a4, a0, 48
-; LMULMAX1-RV32-NEXT: vle64.v v13, (a4)
-; LMULMAX1-RV32-NEXT: vle64.v v14, (a0)
-; LMULMAX1-RV32-NEXT: addi a0, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v15, (a0)
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.i v0, 5
-; LMULMAX1-RV32-NEXT: vmv.v.x v16, a2
-; LMULMAX1-RV32-NEXT: vmerge.vxm v16, v16, a1, v0
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vadd.vv v15, v15, v16
-; LMULMAX1-RV32-NEXT: vadd.vv v14, v14, v16
-; LMULMAX1-RV32-NEXT: vadd.vv v13, v13, v16
-; LMULMAX1-RV32-NEXT: vadd.vv v12, v12, v16
-; LMULMAX1-RV32-NEXT: vadd.vv v11, v11, v16
-; LMULMAX1-RV32-NEXT: vadd.vv v10, v10, v16
-; LMULMAX1-RV32-NEXT: vadd.vv v9, v9, v16
-; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v16
-; LMULMAX1-RV32-NEXT: addi a0, a3, 96
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a0, a3, 112
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a0)
-; LMULMAX1-RV32-NEXT: addi a0, a3, 64
-; LMULMAX1-RV32-NEXT: vse64.v v10, (a0)
-; LMULMAX1-RV32-NEXT: addi a0, a3, 80
-; LMULMAX1-RV32-NEXT: vse64.v v11, (a0)
-; LMULMAX1-RV32-NEXT: addi a0, a3, 32
-; LMULMAX1-RV32-NEXT: vse64.v v12, (a0)
-; LMULMAX1-RV32-NEXT: addi a0, a3, 48
-; LMULMAX1-RV32-NEXT: vse64.v v13, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v14, (a3)
-; LMULMAX1-RV32-NEXT: addi a3, a3, 16
-; LMULMAX1-RV32-NEXT: vse64.v v15, (a3)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX8-RV64-LABEL: vadd_vx_v16i64:
-; LMULMAX8-RV64: # %bb.0:
-; LMULMAX8-RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; LMULMAX8-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX8-RV64-NEXT: vadd.vx v8, v8, a1
-; LMULMAX8-RV64-NEXT: vse64.v v8, (a2)
-; LMULMAX8-RV64-NEXT: ret
-;
-; LMULMAX2-RV64-LABEL: vadd_vx_v16i64:
-; LMULMAX2-RV64: # %bb.0:
-; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV64-NEXT: addi a3, a0, 96
-; LMULMAX2-RV64-NEXT: vle64.v v8, (a3)
-; LMULMAX2-RV64-NEXT: addi a3, a0, 32
-; LMULMAX2-RV64-NEXT: vle64.v v10, (a3)
-; LMULMAX2-RV64-NEXT: addi a3, a0, 64
-; LMULMAX2-RV64-NEXT: vle64.v v12, (a3)
-; LMULMAX2-RV64-NEXT: vle64.v v14, (a0)
-; LMULMAX2-RV64-NEXT: vadd.vx v10, v10, a1
-; LMULMAX2-RV64-NEXT: vadd.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT: vadd.vx v12, v12, a1
-; LMULMAX2-RV64-NEXT: vadd.vx v14, v14, a1
-; LMULMAX2-RV64-NEXT: vse64.v v14, (a2)
-; LMULMAX2-RV64-NEXT: addi a0, a2, 64
-; LMULMAX2-RV64-NEXT: vse64.v v12, (a0)
-; LMULMAX2-RV64-NEXT: addi a0, a2, 96
-; LMULMAX2-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64-NEXT: addi a0, a2, 32
-; LMULMAX2-RV64-NEXT: vse64.v v10, (a0)
-; LMULMAX2-RV64-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: vadd_vx_v16i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a3, a0, 96
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a3)
-; LMULMAX1-RV64-NEXT: addi a3, a0, 112
-; LMULMAX1-RV64-NEXT: vle64.v v10, (a3)
-; LMULMAX1-RV64-NEXT: addi a3, a0, 64
-; LMULMAX1-RV64-NEXT: vle64.v v11, (a3)
-; LMULMAX1-RV64-NEXT: addi a3, a0, 48
-; LMULMAX1-RV64-NEXT: vle64.v v12, (a3)
-; LMULMAX1-RV64-NEXT: addi a3, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v13, (a3)
-; LMULMAX1-RV64-NEXT: addi a3, a0, 80
-; LMULMAX1-RV64-NEXT: addi a0, a0, 32
-; LMULMAX1-RV64-NEXT: vle64.v v14, (a0)
-; LMULMAX1-RV64-NEXT: vle64.v v15, (a3)
-; LMULMAX1-RV64-NEXT: vadd.vx v13, v13, a1
-; LMULMAX1-RV64-NEXT: vadd.vx v12, v12, a1
-; LMULMAX1-RV64-NEXT: vadd.vx v14, v14, a1
-; LMULMAX1-RV64-NEXT: vadd.vx v15, v15, a1
-; LMULMAX1-RV64-NEXT: vadd.vx v11, v11, a1
-; LMULMAX1-RV64-NEXT: vadd.vx v10, v10, a1
-; LMULMAX1-RV64-NEXT: vadd.vx v9, v9, a1
-; LMULMAX1-RV64-NEXT: vadd.vx v8, v8, a1
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a2)
-; LMULMAX1-RV64-NEXT: addi a0, a2, 96
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a0)
-; LMULMAX1-RV64-NEXT: addi a0, a2, 112
-; LMULMAX1-RV64-NEXT: vse64.v v10, (a0)
-; LMULMAX1-RV64-NEXT: addi a0, a2, 64
-; LMULMAX1-RV64-NEXT: vse64.v v11, (a0)
-; LMULMAX1-RV64-NEXT: addi a0, a2, 80
-; LMULMAX1-RV64-NEXT: vse64.v v15, (a0)
-; LMULMAX1-RV64-NEXT: addi a0, a2, 32
-; LMULMAX1-RV64-NEXT: vse64.v v14, (a0)
-; LMULMAX1-RV64-NEXT: addi a0, a2, 48
-; LMULMAX1-RV64-NEXT: vse64.v v12, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a2, 16
-; LMULMAX1-RV64-NEXT: vse64.v v13, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; RV32-LABEL: vadd_vx_v16i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vle64.v v8, (a0)
+; RV32-NEXT: sw a2, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: vse64.v v8, (a3)
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vadd_vx_v16i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vle64.v v8, (a0)
+; RV64-NEXT: vadd.vx v8, v8, a1
+; RV64-NEXT: vse64.v v8, (a2)
+; RV64-NEXT: ret
%va = load <16 x i64>, ptr %a
%head = insertelement <16 x i64> poison, i64 %b, i32 0
%splat = shufflevector <16 x i64> %head, <16 x i64> poison, <16 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
index 841e72f..2c0b1d0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
@@ -1,8 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=4 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX4
-; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=4 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX4
-; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
-; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
define void @gather_const_v16i8(ptr %x) {
; CHECK-LABEL: gather_const_v16i8:
@@ -69,27 +67,14 @@ define void @gather_const_v2i64(ptr %x) {
}
define void @gather_const_v64i8(ptr %x) {
-; LMULMAX4-LABEL: gather_const_v64i8:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: li a1, 64
-; LMULMAX4-NEXT: addi a2, a0, 32
-; LMULMAX4-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; LMULMAX4-NEXT: vlse8.v v8, (a2), zero
-; LMULMAX4-NEXT: vse8.v v8, (a0)
-; LMULMAX4-NEXT: ret
-;
-; LMULMAX1-LABEL: gather_const_v64i8:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, a0, 32
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vlse8.v v8, (a1), zero
-; LMULMAX1-NEXT: addi a2, a0, 16
-; LMULMAX1-NEXT: addi a3, a0, 48
-; LMULMAX1-NEXT: vse8.v v8, (a1)
-; LMULMAX1-NEXT: vse8.v v8, (a3)
-; LMULMAX1-NEXT: vse8.v v8, (a0)
-; LMULMAX1-NEXT: vse8.v v8, (a2)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: gather_const_v64i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 64
+; CHECK-NEXT: addi a2, a0, 32
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vlse8.v v8, (a2), zero
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <64 x i8>, ptr %x
%b = extractelement <64 x i8> %a, i32 32
%c = insertelement <64 x i8> poison, i8 %b, i32 0
@@ -99,28 +84,14 @@ define void @gather_const_v64i8(ptr %x) {
}
define void @gather_const_v16i16(ptr %x) {
-; LMULMAX4-LABEL: gather_const_v16i16:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: li a1, 32
-; LMULMAX4-NEXT: addi a2, a0, 50
-; LMULMAX4-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; LMULMAX4-NEXT: vlse16.v v8, (a2), zero
-; LMULMAX4-NEXT: vse16.v v8, (a0)
-; LMULMAX4-NEXT: ret
-;
-; LMULMAX1-LABEL: gather_const_v16i16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, a0, 50
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vlse16.v v8, (a1), zero
-; LMULMAX1-NEXT: addi a1, a0, 48
-; LMULMAX1-NEXT: addi a2, a0, 16
-; LMULMAX1-NEXT: addi a3, a0, 32
-; LMULMAX1-NEXT: vse16.v v8, (a3)
-; LMULMAX1-NEXT: vse16.v v8, (a1)
-; LMULMAX1-NEXT: vse16.v v8, (a0)
-; LMULMAX1-NEXT: vse16.v v8, (a2)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: gather_const_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 32
+; CHECK-NEXT: addi a2, a0, 50
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vlse16.v v8, (a2), zero
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <32 x i16>, ptr %x
%b = extractelement <32 x i16> %a, i32 25
%c = insertelement <32 x i16> poison, i16 %b, i32 0
@@ -130,27 +101,13 @@ define void @gather_const_v16i16(ptr %x) {
}
define void @gather_const_v16i32(ptr %x) {
-; LMULMAX4-LABEL: gather_const_v16i32:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: addi a1, a0, 36
-; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; LMULMAX4-NEXT: vlse32.v v8, (a1), zero
-; LMULMAX4-NEXT: vse32.v v8, (a0)
-; LMULMAX4-NEXT: ret
-;
-; LMULMAX1-LABEL: gather_const_v16i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, a0, 36
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vlse32.v v8, (a1), zero
-; LMULMAX1-NEXT: addi a1, a0, 32
-; LMULMAX1-NEXT: addi a2, a0, 16
-; LMULMAX1-NEXT: addi a3, a0, 48
-; LMULMAX1-NEXT: vse32.v v8, (a1)
-; LMULMAX1-NEXT: vse32.v v8, (a3)
-; LMULMAX1-NEXT: vse32.v v8, (a0)
-; LMULMAX1-NEXT: vse32.v v8, (a2)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: gather_const_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a1, a0, 36
+; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vlse32.v v8, (a1), zero
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <16 x i32>, ptr %x
%b = extractelement <16 x i32> %a, i32 9
%c = insertelement <16 x i32> poison, i32 %b, i32 0
@@ -160,27 +117,13 @@ define void @gather_const_v16i32(ptr %x) {
}
define void @gather_const_v8i64(ptr %x) {
-; LMULMAX4-LABEL: gather_const_v8i64:
-; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: addi a1, a0, 24
-; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; LMULMAX4-NEXT: vlse64.v v8, (a1), zero
-; LMULMAX4-NEXT: vse64.v v8, (a0)
-; LMULMAX4-NEXT: ret
-;
-; LMULMAX1-LABEL: gather_const_v8i64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, a0, 24
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vlse64.v v8, (a1), zero
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: addi a2, a0, 48
-; LMULMAX1-NEXT: addi a3, a0, 32
-; LMULMAX1-NEXT: vse64.v v8, (a3)
-; LMULMAX1-NEXT: vse64.v v8, (a2)
-; LMULMAX1-NEXT: vse64.v v8, (a0)
-; LMULMAX1-NEXT: vse64.v v8, (a1)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: gather_const_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a1, a0, 24
+; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT: vlse64.v v8, (a1), zero
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x i64>, ptr %x
%b = extractelement <8 x i64> %a, i32 3
%c = insertelement <8 x i64> poison, i64 %b, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
index 7a4620a..175b110 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
@@ -1,8 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,LMULMAX2,LMULMAX2-RV32
-; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,LMULMAX2,LMULMAX2-RV64
-; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,LMULMAX1,LMULMAX1-RV32
-; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,LMULMAX1,LMULMAX1-RV64
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
define void @add_v16i8(ptr %x, ptr %y) {
; CHECK-LABEL: add_v16i8:
@@ -2377,45 +2375,15 @@ define void @umax_xv_v4i32(ptr %x, i32 %y) {
}
define void @add_v32i8(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: add_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a2, 32
-; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: vle8.v v10, (a1)
-; LMULMAX2-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: add_v32i8:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle8.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vadd.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: add_v32i8:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle8.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vadd.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vadd.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: add_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v10, (a1)
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %x
%b = load <32 x i8>, ptr %y
%c = add <32 x i8> %a, %b
@@ -2424,44 +2392,14 @@ define void @add_v32i8(ptr %x, ptr %y) {
}
define void @add_v16i16(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: add_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vle16.v v10, (a1)
-; LMULMAX2-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: add_v16i16:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle16.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vadd.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: add_v16i16:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle16.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vadd.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vadd.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: add_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vle16.v v10, (a1)
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <16 x i16>, ptr %x
%b = load <16 x i16>, ptr %y
%c = add <16 x i16> %a, %b
@@ -2470,44 +2408,14 @@ define void @add_v16i16(ptr %x, ptr %y) {
}
define void @add_v8i32(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: add_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vle32.v v10, (a1)
-; LMULMAX2-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: add_v8i32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle32.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vadd.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: add_v8i32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vadd.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vadd.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: add_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vle32.v v10, (a1)
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
%b = load <8 x i32>, ptr %y
%c = add <8 x i32> %a, %b
@@ -2516,48 +2424,16 @@ define void @add_v8i32(ptr %x, ptr %y) {
}
define void @add_v6i32(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: add_v6i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 6, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vle32.v v10, (a1)
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-NEXT: vsetivli zero, 6, e32, m2, ta, ma
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: add_v6i32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a1)
-; LMULMAX1-RV32-NEXT: vle32.v v10, (a2)
-; LMULMAX1-RV32-NEXT: addi a1, a1, 16
-; LMULMAX1-RV32-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v9
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX1-RV32-NEXT: vadd.vv v8, v10, v11
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: add_v6i32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vadd.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vadd.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: add_v6i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vle32.v v10, (a1)
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vsetivli zero, 6, e32, m2, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <6 x i32>, ptr %x
%b = load <6 x i32>, ptr %y
%c = add <6 x i32> %a, %b
@@ -2566,44 +2442,14 @@ define void @add_v6i32(ptr %x, ptr %y) {
}
define void @add_v4i64(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: add_v4i64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vle64.v v10, (a1)
-; LMULMAX2-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: add_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle64.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vadd.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: add_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vadd.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vadd.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: add_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vle64.v v10, (a1)
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <4 x i64>, ptr %x
%b = load <4 x i64>, ptr %y
%c = add <4 x i64> %a, %b
@@ -2612,45 +2458,15 @@ define void @add_v4i64(ptr %x, ptr %y) {
}
define void @sub_v32i8(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: sub_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a2, 32
-; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: vle8.v v10, (a1)
-; LMULMAX2-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: sub_v32i8:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle8.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vsub.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: sub_v32i8:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle8.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vsub.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vsub.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: sub_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v10, (a1)
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %x
%b = load <32 x i8>, ptr %y
%c = sub <32 x i8> %a, %b
@@ -2659,44 +2475,14 @@ define void @sub_v32i8(ptr %x, ptr %y) {
}
define void @sub_v16i16(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: sub_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vle16.v v10, (a1)
-; LMULMAX2-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: sub_v16i16:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle16.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vsub.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: sub_v16i16:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle16.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vsub.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vsub.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: sub_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vle16.v v10, (a1)
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <16 x i16>, ptr %x
%b = load <16 x i16>, ptr %y
%c = sub <16 x i16> %a, %b
@@ -2705,44 +2491,14 @@ define void @sub_v16i16(ptr %x, ptr %y) {
}
define void @sub_v8i32(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: sub_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vle32.v v10, (a1)
-; LMULMAX2-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: sub_v8i32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle32.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vsub.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: sub_v8i32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vsub.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vsub.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: sub_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vle32.v v10, (a1)
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
%b = load <8 x i32>, ptr %y
%c = sub <8 x i32> %a, %b
@@ -2751,44 +2507,14 @@ define void @sub_v8i32(ptr %x, ptr %y) {
}
define void @sub_v4i64(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: sub_v4i64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vle64.v v10, (a1)
-; LMULMAX2-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: sub_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle64.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vsub.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: sub_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vsub.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vsub.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: sub_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vle64.v v10, (a1)
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <4 x i64>, ptr %x
%b = load <4 x i64>, ptr %y
%c = sub <4 x i64> %a, %b
@@ -2797,45 +2523,15 @@ define void @sub_v4i64(ptr %x, ptr %y) {
}
define void @mul_v32i8(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: mul_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a2, 32
-; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: vle8.v v10, (a1)
-; LMULMAX2-NEXT: vmul.vv v8, v8, v10
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: mul_v32i8:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle8.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vmul.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vmul.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: mul_v32i8:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle8.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vmul.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vmul.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: mul_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v10, (a1)
+; CHECK-NEXT: vmul.vv v8, v8, v10
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %x
%b = load <32 x i8>, ptr %y
%c = mul <32 x i8> %a, %b
@@ -2844,44 +2540,14 @@ define void @mul_v32i8(ptr %x, ptr %y) {
}
define void @mul_v16i16(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: mul_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vle16.v v10, (a1)
-; LMULMAX2-NEXT: vmul.vv v8, v8, v10
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: mul_v16i16:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle16.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vmul.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vmul.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: mul_v16i16:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle16.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vmul.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vmul.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: mul_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vle16.v v10, (a1)
+; CHECK-NEXT: vmul.vv v8, v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <16 x i16>, ptr %x
%b = load <16 x i16>, ptr %y
%c = mul <16 x i16> %a, %b
@@ -2890,44 +2556,14 @@ define void @mul_v16i16(ptr %x, ptr %y) {
}
define void @mul_v8i32(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: mul_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vle32.v v10, (a1)
-; LMULMAX2-NEXT: vmul.vv v8, v8, v10
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: mul_v8i32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle32.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vmul.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vmul.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: mul_v8i32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vmul.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vmul.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: mul_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vle32.v v10, (a1)
+; CHECK-NEXT: vmul.vv v8, v8, v10
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
%b = load <8 x i32>, ptr %y
%c = mul <8 x i32> %a, %b
@@ -2936,44 +2572,14 @@ define void @mul_v8i32(ptr %x, ptr %y) {
}
define void @mul_v4i64(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: mul_v4i64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vle64.v v10, (a1)
-; LMULMAX2-NEXT: vmul.vv v8, v8, v10
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: mul_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle64.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vmul.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vmul.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: mul_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vmul.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vmul.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: mul_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vle64.v v10, (a1)
+; CHECK-NEXT: vmul.vv v8, v8, v10
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <4 x i64>, ptr %x
%b = load <4 x i64>, ptr %y
%c = mul <4 x i64> %a, %b
@@ -2982,45 +2588,15 @@ define void @mul_v4i64(ptr %x, ptr %y) {
}
define void @and_v32i8(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: and_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a2, 32
-; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: vle8.v v10, (a1)
-; LMULMAX2-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: and_v32i8:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle8.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vand.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: and_v32i8:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle8.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vand.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: and_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v10, (a1)
+; CHECK-NEXT: vand.vv v8, v8, v10
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %x
%b = load <32 x i8>, ptr %y
%c = and <32 x i8> %a, %b
@@ -3029,44 +2605,14 @@ define void @and_v32i8(ptr %x, ptr %y) {
}
define void @and_v16i16(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: and_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vle16.v v10, (a1)
-; LMULMAX2-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: and_v16i16:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle16.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vand.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: and_v16i16:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle16.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vand.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: and_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vle16.v v10, (a1)
+; CHECK-NEXT: vand.vv v8, v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <16 x i16>, ptr %x
%b = load <16 x i16>, ptr %y
%c = and <16 x i16> %a, %b
@@ -3075,44 +2621,14 @@ define void @and_v16i16(ptr %x, ptr %y) {
}
define void @and_v8i32(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: and_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vle32.v v10, (a1)
-; LMULMAX2-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: and_v8i32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle32.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vand.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: and_v8i32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vand.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: and_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vle32.v v10, (a1)
+; CHECK-NEXT: vand.vv v8, v8, v10
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
%b = load <8 x i32>, ptr %y
%c = and <8 x i32> %a, %b
@@ -3121,44 +2637,14 @@ define void @and_v8i32(ptr %x, ptr %y) {
}
define void @and_v4i64(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: and_v4i64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vle64.v v10, (a1)
-; LMULMAX2-NEXT: vand.vv v8, v8, v10
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: and_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle64.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vand.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: and_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vand.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: and_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vle64.v v10, (a1)
+; CHECK-NEXT: vand.vv v8, v8, v10
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <4 x i64>, ptr %x
%b = load <4 x i64>, ptr %y
%c = and <4 x i64> %a, %b
@@ -3167,45 +2653,15 @@ define void @and_v4i64(ptr %x, ptr %y) {
}
define void @or_v32i8(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: or_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a2, 32
-; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: vle8.v v10, (a1)
-; LMULMAX2-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: or_v32i8:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle8.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: or_v32i8:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle8.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vor.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: or_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v10, (a1)
+; CHECK-NEXT: vor.vv v8, v8, v10
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %x
%b = load <32 x i8>, ptr %y
%c = or <32 x i8> %a, %b
@@ -3214,44 +2670,14 @@ define void @or_v32i8(ptr %x, ptr %y) {
}
define void @or_v16i16(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: or_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vle16.v v10, (a1)
-; LMULMAX2-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: or_v16i16:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle16.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: or_v16i16:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle16.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vor.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: or_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vle16.v v10, (a1)
+; CHECK-NEXT: vor.vv v8, v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <16 x i16>, ptr %x
%b = load <16 x i16>, ptr %y
%c = or <16 x i16> %a, %b
@@ -3260,44 +2686,14 @@ define void @or_v16i16(ptr %x, ptr %y) {
}
define void @or_v8i32(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: or_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vle32.v v10, (a1)
-; LMULMAX2-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: or_v8i32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle32.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: or_v8i32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vor.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: or_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vle32.v v10, (a1)
+; CHECK-NEXT: vor.vv v8, v8, v10
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
%b = load <8 x i32>, ptr %y
%c = or <8 x i32> %a, %b
@@ -3306,44 +2702,14 @@ define void @or_v8i32(ptr %x, ptr %y) {
}
define void @or_v4i64(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: or_v4i64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vle64.v v10, (a1)
-; LMULMAX2-NEXT: vor.vv v8, v8, v10
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: or_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle64.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: or_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vor.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: or_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vle64.v v10, (a1)
+; CHECK-NEXT: vor.vv v8, v8, v10
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <4 x i64>, ptr %x
%b = load <4 x i64>, ptr %y
%c = or <4 x i64> %a, %b
@@ -3352,45 +2718,15 @@ define void @or_v4i64(ptr %x, ptr %y) {
}
define void @xor_v32i8(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: xor_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a2, 32
-; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: vle8.v v10, (a1)
-; LMULMAX2-NEXT: vxor.vv v8, v8, v10
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: xor_v32i8:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle8.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vxor.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vxor.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: xor_v32i8:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle8.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vxor.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vxor.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: xor_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v10, (a1)
+; CHECK-NEXT: vxor.vv v8, v8, v10
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %x
%b = load <32 x i8>, ptr %y
%c = xor <32 x i8> %a, %b
@@ -3399,44 +2735,14 @@ define void @xor_v32i8(ptr %x, ptr %y) {
}
define void @xor_v16i16(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: xor_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vle16.v v10, (a1)
-; LMULMAX2-NEXT: vxor.vv v8, v8, v10
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: xor_v16i16:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle16.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vxor.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vxor.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: xor_v16i16:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle16.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vxor.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vxor.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: xor_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vle16.v v10, (a1)
+; CHECK-NEXT: vxor.vv v8, v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <16 x i16>, ptr %x
%b = load <16 x i16>, ptr %y
%c = xor <16 x i16> %a, %b
@@ -3445,44 +2751,14 @@ define void @xor_v16i16(ptr %x, ptr %y) {
}
define void @xor_v8i32(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: xor_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vle32.v v10, (a1)
-; LMULMAX2-NEXT: vxor.vv v8, v8, v10
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: xor_v8i32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle32.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vxor.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vxor.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: xor_v8i32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vxor.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vxor.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: xor_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vle32.v v10, (a1)
+; CHECK-NEXT: vxor.vv v8, v8, v10
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
%b = load <8 x i32>, ptr %y
%c = xor <8 x i32> %a, %b
@@ -3491,44 +2767,14 @@ define void @xor_v8i32(ptr %x, ptr %y) {
}
define void @xor_v4i64(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: xor_v4i64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vle64.v v10, (a1)
-; LMULMAX2-NEXT: vxor.vv v8, v8, v10
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: xor_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle64.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vxor.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vxor.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: xor_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vxor.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vxor.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: xor_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vle64.v v10, (a1)
+; CHECK-NEXT: vxor.vv v8, v8, v10
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <4 x i64>, ptr %x
%b = load <4 x i64>, ptr %y
%c = xor <4 x i64> %a, %b
@@ -3537,45 +2783,15 @@ define void @xor_v4i64(ptr %x, ptr %y) {
}
define void @lshr_v32i8(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: lshr_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a2, 32
-; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: vle8.v v10, (a1)
-; LMULMAX2-NEXT: vsrl.vv v8, v8, v10
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: lshr_v32i8:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle8.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vsrl.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vsrl.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: lshr_v32i8:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle8.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vsrl.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vsrl.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: lshr_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v10, (a1)
+; CHECK-NEXT: vsrl.vv v8, v8, v10
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %x
%b = load <32 x i8>, ptr %y
%c = lshr <32 x i8> %a, %b
@@ -3584,44 +2800,14 @@ define void @lshr_v32i8(ptr %x, ptr %y) {
}
define void @lshr_v16i16(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: lshr_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vle16.v v10, (a1)
-; LMULMAX2-NEXT: vsrl.vv v8, v8, v10
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: lshr_v16i16:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle16.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vsrl.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vsrl.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: lshr_v16i16:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle16.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vsrl.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vsrl.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: lshr_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vle16.v v10, (a1)
+; CHECK-NEXT: vsrl.vv v8, v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <16 x i16>, ptr %x
%b = load <16 x i16>, ptr %y
%c = lshr <16 x i16> %a, %b
@@ -3630,44 +2816,14 @@ define void @lshr_v16i16(ptr %x, ptr %y) {
}
define void @lshr_v8i32(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: lshr_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vle32.v v10, (a1)
-; LMULMAX2-NEXT: vsrl.vv v8, v8, v10
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: lshr_v8i32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle32.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vsrl.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vsrl.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: lshr_v8i32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vsrl.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vsrl.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: lshr_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vle32.v v10, (a1)
+; CHECK-NEXT: vsrl.vv v8, v8, v10
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
%b = load <8 x i32>, ptr %y
%c = lshr <8 x i32> %a, %b
@@ -3676,44 +2832,14 @@ define void @lshr_v8i32(ptr %x, ptr %y) {
}
define void @lshr_v4i64(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: lshr_v4i64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vle64.v v10, (a1)
-; LMULMAX2-NEXT: vsrl.vv v8, v8, v10
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: lshr_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle64.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vsrl.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vsrl.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: lshr_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vsrl.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vsrl.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: lshr_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vle64.v v10, (a1)
+; CHECK-NEXT: vsrl.vv v8, v8, v10
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <4 x i64>, ptr %x
%b = load <4 x i64>, ptr %y
%c = lshr <4 x i64> %a, %b
@@ -3722,45 +2848,15 @@ define void @lshr_v4i64(ptr %x, ptr %y) {
}
define void @ashr_v32i8(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: ashr_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a2, 32
-; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: vle8.v v10, (a1)
-; LMULMAX2-NEXT: vsra.vv v8, v8, v10
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: ashr_v32i8:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle8.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vsra.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vsra.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: ashr_v32i8:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle8.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vsra.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vsra.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: ashr_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v10, (a1)
+; CHECK-NEXT: vsra.vv v8, v8, v10
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %x
%b = load <32 x i8>, ptr %y
%c = ashr <32 x i8> %a, %b
@@ -3769,44 +2865,14 @@ define void @ashr_v32i8(ptr %x, ptr %y) {
}
define void @ashr_v16i16(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: ashr_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vle16.v v10, (a1)
-; LMULMAX2-NEXT: vsra.vv v8, v8, v10
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: ashr_v16i16:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle16.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vsra.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vsra.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: ashr_v16i16:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle16.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vsra.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vsra.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: ashr_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vle16.v v10, (a1)
+; CHECK-NEXT: vsra.vv v8, v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <16 x i16>, ptr %x
%b = load <16 x i16>, ptr %y
%c = ashr <16 x i16> %a, %b
@@ -3815,44 +2881,14 @@ define void @ashr_v16i16(ptr %x, ptr %y) {
}
define void @ashr_v8i32(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: ashr_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vle32.v v10, (a1)
-; LMULMAX2-NEXT: vsra.vv v8, v8, v10
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: ashr_v8i32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle32.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vsra.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vsra.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: ashr_v8i32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vsra.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vsra.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: ashr_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vle32.v v10, (a1)
+; CHECK-NEXT: vsra.vv v8, v8, v10
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
%b = load <8 x i32>, ptr %y
%c = ashr <8 x i32> %a, %b
@@ -3861,44 +2897,14 @@ define void @ashr_v8i32(ptr %x, ptr %y) {
}
define void @ashr_v4i64(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: ashr_v4i64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vle64.v v10, (a1)
-; LMULMAX2-NEXT: vsra.vv v8, v8, v10
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: ashr_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle64.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vsra.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vsra.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: ashr_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vsra.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vsra.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: ashr_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vle64.v v10, (a1)
+; CHECK-NEXT: vsra.vv v8, v8, v10
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <4 x i64>, ptr %x
%b = load <4 x i64>, ptr %y
%c = ashr <4 x i64> %a, %b
@@ -3907,45 +2913,15 @@ define void @ashr_v4i64(ptr %x, ptr %y) {
}
define void @shl_v32i8(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: shl_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a2, 32
-; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: vle8.v v10, (a1)
-; LMULMAX2-NEXT: vsll.vv v8, v8, v10
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: shl_v32i8:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle8.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vsll.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vsll.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: shl_v32i8:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle8.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vsll.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vsll.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: shl_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v10, (a1)
+; CHECK-NEXT: vsll.vv v8, v8, v10
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %x
%b = load <32 x i8>, ptr %y
%c = shl <32 x i8> %a, %b
@@ -3954,44 +2930,14 @@ define void @shl_v32i8(ptr %x, ptr %y) {
}
define void @shl_v16i16(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: shl_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vle16.v v10, (a1)
-; LMULMAX2-NEXT: vsll.vv v8, v8, v10
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: shl_v16i16:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle16.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vsll.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vsll.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: shl_v16i16:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle16.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vsll.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vsll.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: shl_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vle16.v v10, (a1)
+; CHECK-NEXT: vsll.vv v8, v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <16 x i16>, ptr %x
%b = load <16 x i16>, ptr %y
%c = shl <16 x i16> %a, %b
@@ -4000,44 +2946,14 @@ define void @shl_v16i16(ptr %x, ptr %y) {
}
define void @shl_v8i32(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: shl_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vle32.v v10, (a1)
-; LMULMAX2-NEXT: vsll.vv v8, v8, v10
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: shl_v8i32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle32.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vsll.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vsll.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: shl_v8i32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vsll.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vsll.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: shl_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vle32.v v10, (a1)
+; CHECK-NEXT: vsll.vv v8, v8, v10
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
%b = load <8 x i32>, ptr %y
%c = shl <8 x i32> %a, %b
@@ -4046,44 +2962,14 @@ define void @shl_v8i32(ptr %x, ptr %y) {
}
define void @shl_v4i64(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: shl_v4i64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vle64.v v10, (a1)
-; LMULMAX2-NEXT: vsll.vv v8, v8, v10
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: shl_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle64.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vsll.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vsll.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: shl_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vsll.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vsll.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: shl_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vle64.v v10, (a1)
+; CHECK-NEXT: vsll.vv v8, v8, v10
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <4 x i64>, ptr %x
%b = load <4 x i64>, ptr %y
%c = shl <4 x i64> %a, %b
@@ -4092,45 +2978,15 @@ define void @shl_v4i64(ptr %x, ptr %y) {
}
define void @sdiv_v32i8(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: sdiv_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a2, 32
-; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: vle8.v v10, (a1)
-; LMULMAX2-NEXT: vdiv.vv v8, v8, v10
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: sdiv_v32i8:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle8.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vdiv.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vdiv.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: sdiv_v32i8:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle8.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vdiv.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vdiv.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: sdiv_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v10, (a1)
+; CHECK-NEXT: vdiv.vv v8, v8, v10
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %x
%b = load <32 x i8>, ptr %y
%c = sdiv <32 x i8> %a, %b
@@ -4139,44 +2995,14 @@ define void @sdiv_v32i8(ptr %x, ptr %y) {
}
define void @sdiv_v16i16(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: sdiv_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vle16.v v10, (a1)
-; LMULMAX2-NEXT: vdiv.vv v8, v8, v10
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: sdiv_v16i16:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle16.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vdiv.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vdiv.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: sdiv_v16i16:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle16.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vdiv.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vdiv.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: sdiv_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vle16.v v10, (a1)
+; CHECK-NEXT: vdiv.vv v8, v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <16 x i16>, ptr %x
%b = load <16 x i16>, ptr %y
%c = sdiv <16 x i16> %a, %b
@@ -4185,44 +3011,14 @@ define void @sdiv_v16i16(ptr %x, ptr %y) {
}
define void @sdiv_v8i32(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: sdiv_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vle32.v v10, (a1)
-; LMULMAX2-NEXT: vdiv.vv v8, v8, v10
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: sdiv_v8i32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle32.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vdiv.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vdiv.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: sdiv_v8i32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vdiv.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vdiv.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: sdiv_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vle32.v v10, (a1)
+; CHECK-NEXT: vdiv.vv v8, v8, v10
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
%b = load <8 x i32>, ptr %y
%c = sdiv <8 x i32> %a, %b
@@ -4231,44 +3027,14 @@ define void @sdiv_v8i32(ptr %x, ptr %y) {
}
define void @sdiv_v4i64(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: sdiv_v4i64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vle64.v v10, (a1)
-; LMULMAX2-NEXT: vdiv.vv v8, v8, v10
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: sdiv_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle64.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vdiv.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vdiv.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: sdiv_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vdiv.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vdiv.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: sdiv_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vle64.v v10, (a1)
+; CHECK-NEXT: vdiv.vv v8, v8, v10
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <4 x i64>, ptr %x
%b = load <4 x i64>, ptr %y
%c = sdiv <4 x i64> %a, %b
@@ -4277,45 +3043,15 @@ define void @sdiv_v4i64(ptr %x, ptr %y) {
}
define void @srem_v32i8(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: srem_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a2, 32
-; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: vle8.v v10, (a1)
-; LMULMAX2-NEXT: vrem.vv v8, v8, v10
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: srem_v32i8:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle8.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vrem.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vrem.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: srem_v32i8:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle8.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vrem.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vrem.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: srem_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v10, (a1)
+; CHECK-NEXT: vrem.vv v8, v8, v10
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %x
%b = load <32 x i8>, ptr %y
%c = srem <32 x i8> %a, %b
@@ -4324,44 +3060,14 @@ define void @srem_v32i8(ptr %x, ptr %y) {
}
define void @srem_v16i16(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: srem_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vle16.v v10, (a1)
-; LMULMAX2-NEXT: vrem.vv v8, v8, v10
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: srem_v16i16:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle16.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vrem.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vrem.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: srem_v16i16:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle16.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vrem.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vrem.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: srem_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vle16.v v10, (a1)
+; CHECK-NEXT: vrem.vv v8, v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <16 x i16>, ptr %x
%b = load <16 x i16>, ptr %y
%c = srem <16 x i16> %a, %b
@@ -4370,44 +3076,14 @@ define void @srem_v16i16(ptr %x, ptr %y) {
}
define void @srem_v8i32(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: srem_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vle32.v v10, (a1)
-; LMULMAX2-NEXT: vrem.vv v8, v8, v10
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: srem_v8i32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle32.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vrem.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vrem.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: srem_v8i32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vrem.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vrem.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: srem_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vle32.v v10, (a1)
+; CHECK-NEXT: vrem.vv v8, v8, v10
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
%b = load <8 x i32>, ptr %y
%c = srem <8 x i32> %a, %b
@@ -4416,44 +3092,14 @@ define void @srem_v8i32(ptr %x, ptr %y) {
}
define void @srem_v4i64(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: srem_v4i64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vle64.v v10, (a1)
-; LMULMAX2-NEXT: vrem.vv v8, v8, v10
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: srem_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle64.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vrem.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vrem.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: srem_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vrem.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vrem.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: srem_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vle64.v v10, (a1)
+; CHECK-NEXT: vrem.vv v8, v8, v10
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <4 x i64>, ptr %x
%b = load <4 x i64>, ptr %y
%c = srem <4 x i64> %a, %b
@@ -4462,45 +3108,15 @@ define void @srem_v4i64(ptr %x, ptr %y) {
}
define void @udiv_v32i8(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: udiv_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a2, 32
-; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: vle8.v v10, (a1)
-; LMULMAX2-NEXT: vdivu.vv v8, v8, v10
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: udiv_v32i8:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle8.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vdivu.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vdivu.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: udiv_v32i8:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle8.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vdivu.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vdivu.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: udiv_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v10, (a1)
+; CHECK-NEXT: vdivu.vv v8, v8, v10
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %x
%b = load <32 x i8>, ptr %y
%c = udiv <32 x i8> %a, %b
@@ -4509,44 +3125,14 @@ define void @udiv_v32i8(ptr %x, ptr %y) {
}
define void @udiv_v16i16(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: udiv_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vle16.v v10, (a1)
-; LMULMAX2-NEXT: vdivu.vv v8, v8, v10
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: udiv_v16i16:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle16.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vdivu.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vdivu.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: udiv_v16i16:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle16.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vdivu.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vdivu.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: udiv_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vle16.v v10, (a1)
+; CHECK-NEXT: vdivu.vv v8, v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <16 x i16>, ptr %x
%b = load <16 x i16>, ptr %y
%c = udiv <16 x i16> %a, %b
@@ -4555,44 +3141,14 @@ define void @udiv_v16i16(ptr %x, ptr %y) {
}
define void @udiv_v8i32(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: udiv_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vle32.v v10, (a1)
-; LMULMAX2-NEXT: vdivu.vv v8, v8, v10
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: udiv_v8i32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle32.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vdivu.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vdivu.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: udiv_v8i32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vdivu.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vdivu.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: udiv_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vle32.v v10, (a1)
+; CHECK-NEXT: vdivu.vv v8, v8, v10
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
%b = load <8 x i32>, ptr %y
%c = udiv <8 x i32> %a, %b
@@ -4601,44 +3157,14 @@ define void @udiv_v8i32(ptr %x, ptr %y) {
}
define void @udiv_v4i64(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: udiv_v4i64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vle64.v v10, (a1)
-; LMULMAX2-NEXT: vdivu.vv v8, v8, v10
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: udiv_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle64.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vdivu.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vdivu.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: udiv_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vdivu.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vdivu.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: udiv_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vle64.v v10, (a1)
+; CHECK-NEXT: vdivu.vv v8, v8, v10
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <4 x i64>, ptr %x
%b = load <4 x i64>, ptr %y
%c = udiv <4 x i64> %a, %b
@@ -4647,45 +3173,15 @@ define void @udiv_v4i64(ptr %x, ptr %y) {
}
define void @urem_v32i8(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: urem_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a2, 32
-; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: vle8.v v10, (a1)
-; LMULMAX2-NEXT: vremu.vv v8, v8, v10
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: urem_v32i8:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle8.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vremu.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vremu.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: urem_v32i8:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle8.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vremu.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vremu.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: urem_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v10, (a1)
+; CHECK-NEXT: vremu.vv v8, v8, v10
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %x
%b = load <32 x i8>, ptr %y
%c = urem <32 x i8> %a, %b
@@ -4694,44 +3190,14 @@ define void @urem_v32i8(ptr %x, ptr %y) {
}
define void @urem_v16i16(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: urem_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vle16.v v10, (a1)
-; LMULMAX2-NEXT: vremu.vv v8, v8, v10
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: urem_v16i16:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle16.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vremu.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vremu.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: urem_v16i16:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle16.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vremu.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vremu.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: urem_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vle16.v v10, (a1)
+; CHECK-NEXT: vremu.vv v8, v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <16 x i16>, ptr %x
%b = load <16 x i16>, ptr %y
%c = urem <16 x i16> %a, %b
@@ -4740,44 +3206,14 @@ define void @urem_v16i16(ptr %x, ptr %y) {
}
define void @urem_v8i32(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: urem_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vle32.v v10, (a1)
-; LMULMAX2-NEXT: vremu.vv v8, v8, v10
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: urem_v8i32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle32.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vremu.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vremu.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: urem_v8i32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vremu.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vremu.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: urem_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vle32.v v10, (a1)
+; CHECK-NEXT: vremu.vv v8, v8, v10
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
%b = load <8 x i32>, ptr %y
%c = urem <8 x i32> %a, %b
@@ -4786,44 +3222,14 @@ define void @urem_v8i32(ptr %x, ptr %y) {
}
define void @urem_v4i64(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: urem_v4i64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vle64.v v10, (a1)
-; LMULMAX2-NEXT: vremu.vv v8, v8, v10
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: urem_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle64.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vremu.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vremu.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: urem_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vremu.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vremu.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: urem_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vle64.v v10, (a1)
+; CHECK-NEXT: vremu.vv v8, v8, v10
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <4 x i64>, ptr %x
%b = load <4 x i64>, ptr %y
%c = urem <4 x i64> %a, %b
@@ -4832,29 +3238,14 @@ define void @urem_v4i64(ptr %x, ptr %y) {
}
define void @extract_v4i64(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: extract_v4i64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vle64.v v10, (a1)
-; LMULMAX2-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: extract_v4i64:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT: vle64.v v8, (a0)
-; LMULMAX1-NEXT: addi a2, a0, 16
-; LMULMAX1-NEXT: vle64.v v9, (a2)
-; LMULMAX1-NEXT: vle64.v v10, (a1)
-; LMULMAX1-NEXT: addi a1, a1, 16
-; LMULMAX1-NEXT: vle64.v v11, (a1)
-; LMULMAX1-NEXT: vadd.vv v9, v9, v11
-; LMULMAX1-NEXT: vadd.vv v8, v8, v10
-; LMULMAX1-NEXT: vse64.v v8, (a0)
-; LMULMAX1-NEXT: vse64.v v9, (a2)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: extract_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vle64.v v10, (a1)
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <4 x i64>, ptr %x
%b = load <4 x i64>, ptr %y
br label %"compute"
@@ -4865,70 +3256,55 @@ define void @extract_v4i64(ptr %x, ptr %y) {
}
define void @mulhu_v32i8(ptr %x) {
-; LMULMAX2-LABEL: mulhu_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a1, 32
-; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: vmv.v.i v10, 0
-; LMULMAX2-NEXT: lui a1, 163907
-; LMULMAX2-NEXT: addi a1, a1, -2044
-; LMULMAX2-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; LMULMAX2-NEXT: vmv.s.x v0, a1
-; LMULMAX2-NEXT: li a1, -128
-; LMULMAX2-NEXT: vsetvli zero, zero, e8, m2, ta, ma
-; LMULMAX2-NEXT: vmerge.vxm v12, v10, a1, v0
-; LMULMAX2-NEXT: lui a1, 66049
-; LMULMAX2-NEXT: addi a1, a1, 32
-; LMULMAX2-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; LMULMAX2-NEXT: vmv.s.x v0, a1
-; LMULMAX2-NEXT: vsetvli zero, zero, e8, m2, ta, ma
-; LMULMAX2-NEXT: lui a1, %hi(.LCPI181_0)
-; LMULMAX2-NEXT: addi a1, a1, %lo(.LCPI181_0)
-; LMULMAX2-NEXT: vle8.v v14, (a1)
-; LMULMAX2-NEXT: vmerge.vim v10, v10, 1, v0
-; LMULMAX2-NEXT: vsrl.vv v10, v8, v10
-; LMULMAX2-NEXT: vmulhu.vv v10, v10, v14
-; LMULMAX2-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-NEXT: vmulhu.vv v8, v8, v12
-; LMULMAX2-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-NEXT: vmv.v.i v10, 4
-; LMULMAX2-NEXT: lui a1, 8208
-; LMULMAX2-NEXT: addi a1, a1, 513
-; LMULMAX2-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; LMULMAX2-NEXT: vmv.s.x v0, a1
-; LMULMAX2-NEXT: vsetvli zero, zero, e8, m2, ta, ma
-; LMULMAX2-NEXT: vmerge.vim v10, v10, 1, v0
-; LMULMAX2-NEXT: lui a1, 66785
-; LMULMAX2-NEXT: addi a1, a1, 78
-; LMULMAX2-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; LMULMAX2-NEXT: vmv.s.x v0, a1
-; LMULMAX2-NEXT: vsetvli zero, zero, e8, m2, ta, ma
-; LMULMAX2-NEXT: vmerge.vim v10, v10, 3, v0
-; LMULMAX2-NEXT: lui a1, 529160
-; LMULMAX2-NEXT: addi a1, a1, 304
-; LMULMAX2-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; LMULMAX2-NEXT: vmv.s.x v0, a1
-; LMULMAX2-NEXT: vsetvli zero, zero, e8, m2, ta, ma
-; LMULMAX2-NEXT: vmerge.vim v10, v10, 2, v0
-; LMULMAX2-NEXT: vsrl.vv v8, v8, v10
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: mulhu_v32i8:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vle8.v v8, (a1)
-; LMULMAX1-NEXT: lui a2, %hi(.LCPI181_0)
-; LMULMAX1-NEXT: addi a2, a2, %lo(.LCPI181_0)
-; LMULMAX1-NEXT: vle8.v v9, (a2)
-; LMULMAX1-NEXT: vle8.v v10, (a0)
-; LMULMAX1-NEXT: vdivu.vv v8, v8, v9
-; LMULMAX1-NEXT: vdivu.vv v9, v10, v9
-; LMULMAX1-NEXT: vse8.v v9, (a0)
-; LMULMAX1-NEXT: vse8.v v8, (a1)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: mulhu_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 32
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: lui a1, 163907
+; CHECK-NEXT: addi a1, a1, -2044
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT: vmv.s.x v0, a1
+; CHECK-NEXT: li a1, -128
+; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmerge.vxm v12, v10, a1, v0
+; CHECK-NEXT: lui a1, 66049
+; CHECK-NEXT: addi a1, a1, 32
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT: vmv.s.x v0, a1
+; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
+; CHECK-NEXT: lui a1, %hi(.LCPI181_0)
+; CHECK-NEXT: addi a1, a1, %lo(.LCPI181_0)
+; CHECK-NEXT: vle8.v v14, (a1)
+; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
+; CHECK-NEXT: vsrl.vv v10, v8, v10
+; CHECK-NEXT: vmulhu.vv v10, v10, v14
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vmulhu.vv v8, v8, v12
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vmv.v.i v10, 4
+; CHECK-NEXT: lui a1, 8208
+; CHECK-NEXT: addi a1, a1, 513
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT: vmv.s.x v0, a1
+; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
+; CHECK-NEXT: lui a1, 66785
+; CHECK-NEXT: addi a1, a1, 78
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT: vmv.s.x v0, a1
+; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmerge.vim v10, v10, 3, v0
+; CHECK-NEXT: lui a1, 529160
+; CHECK-NEXT: addi a1, a1, 304
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT: vmv.s.x v0, a1
+; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmerge.vim v10, v10, 2, v0
+; CHECK-NEXT: vsrl.vv v8, v8, v10
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %x
%b = udiv <32 x i8> %a, <i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25>
store <32 x i8> %b, ptr %x
@@ -4936,93 +3312,78 @@ define void @mulhu_v32i8(ptr %x) {
}
define void @mulhu_v16i16(ptr %x) {
-; LMULMAX2-RV32-LABEL: mulhu_v16i16:
-; LMULMAX2-RV32: # %bb.0:
-; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vle16.v v10, (a0)
-; LMULMAX2-RV32-NEXT: li a1, 257
-; LMULMAX2-RV32-NEXT: vmv.s.x v0, a1
-; LMULMAX2-RV32-NEXT: vmv.v.i v8, 0
-; LMULMAX2-RV32-NEXT: lui a1, 1048568
-; LMULMAX2-RV32-NEXT: vmerge.vxm v12, v8, a1, v0
-; LMULMAX2-RV32-NEXT: lui a1, 4
-; LMULMAX2-RV32-NEXT: addi a1, a1, 64
-; LMULMAX2-RV32-NEXT: vmv.s.x v8, a1
-; LMULMAX2-RV32-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; LMULMAX2-RV32-NEXT: vmv.v.i v9, 0
-; LMULMAX2-RV32-NEXT: vmv1r.v v0, v8
-; LMULMAX2-RV32-NEXT: vmerge.vim v9, v9, 1, v0
-; LMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; LMULMAX2-RV32-NEXT: lui a1, %hi(.LCPI182_0)
-; LMULMAX2-RV32-NEXT: addi a1, a1, %lo(.LCPI182_0)
-; LMULMAX2-RV32-NEXT: vle16.v v14, (a1)
-; LMULMAX2-RV32-NEXT: vsext.vf2 v16, v9
-; LMULMAX2-RV32-NEXT: vsrl.vv v16, v10, v16
-; LMULMAX2-RV32-NEXT: vmulhu.vv v14, v16, v14
-; LMULMAX2-RV32-NEXT: vsub.vv v10, v10, v14
-; LMULMAX2-RV32-NEXT: vmulhu.vv v10, v10, v12
-; LMULMAX2-RV32-NEXT: vadd.vv v10, v10, v14
-; LMULMAX2-RV32-NEXT: lui a1, 2
-; LMULMAX2-RV32-NEXT: addi a1, a1, 289
-; LMULMAX2-RV32-NEXT: vmv.s.x v0, a1
-; LMULMAX2-RV32-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; LMULMAX2-RV32-NEXT: vmv.v.i v9, 3
-; LMULMAX2-RV32-NEXT: vmerge.vim v9, v9, 2, v0
-; LMULMAX2-RV32-NEXT: vmv1r.v v0, v8
-; LMULMAX2-RV32-NEXT: vmerge.vim v8, v9, 1, v0
-; LMULMAX2-RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vsext.vf2 v12, v8
-; LMULMAX2-RV32-NEXT: vsrl.vv v8, v10, v12
-; LMULMAX2-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV32-NEXT: ret
-;
-; LMULMAX2-RV64-LABEL: mulhu_v16i16:
-; LMULMAX2-RV64: # %bb.0:
-; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-RV64-NEXT: vle16.v v8, (a0)
-; LMULMAX2-RV64-NEXT: li a1, 257
-; LMULMAX2-RV64-NEXT: vmv.s.x v0, a1
-; LMULMAX2-RV64-NEXT: vmv.v.i v10, 0
-; LMULMAX2-RV64-NEXT: lui a1, 1048568
-; LMULMAX2-RV64-NEXT: vmerge.vxm v10, v10, a1, v0
-; LMULMAX2-RV64-NEXT: li a1, 1
-; LMULMAX2-RV64-NEXT: slli a1, a1, 48
-; LMULMAX2-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV64-NEXT: vmv.v.x v12, a1
-; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI182_0)
-; LMULMAX2-RV64-NEXT: addi a1, a1, %lo(.LCPI182_0)
-; LMULMAX2-RV64-NEXT: vle16.v v14, (a1)
-; LMULMAX2-RV64-NEXT: vsext.vf2 v16, v12
-; LMULMAX2-RV64-NEXT: vsrl.vv v12, v8, v16
-; LMULMAX2-RV64-NEXT: vmulhu.vv v12, v12, v14
-; LMULMAX2-RV64-NEXT: vsub.vv v8, v8, v12
-; LMULMAX2-RV64-NEXT: vmulhu.vv v8, v8, v10
-; LMULMAX2-RV64-NEXT: vadd.vv v8, v8, v12
-; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI182_1)
-; LMULMAX2-RV64-NEXT: addi a1, a1, %lo(.LCPI182_1)
-; LMULMAX2-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX2-RV64-NEXT: vlse64.v v10, (a1), zero
-; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-RV64-NEXT: vsext.vf2 v12, v10
-; LMULMAX2-RV64-NEXT: vsrl.vv v8, v8, v12
-; LMULMAX2-RV64-NEXT: vse16.v v8, (a0)
-; LMULMAX2-RV64-NEXT: ret
+; RV32-LABEL: mulhu_v16i16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vle16.v v10, (a0)
+; RV32-NEXT: li a1, 257
+; RV32-NEXT: vmv.s.x v0, a1
+; RV32-NEXT: vmv.v.i v8, 0
+; RV32-NEXT: lui a1, 1048568
+; RV32-NEXT: vmerge.vxm v12, v8, a1, v0
+; RV32-NEXT: lui a1, 4
+; RV32-NEXT: addi a1, a1, 64
+; RV32-NEXT: vmv.s.x v8, a1
+; RV32-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; RV32-NEXT: vmv.v.i v9, 0
+; RV32-NEXT: vmv1r.v v0, v8
+; RV32-NEXT: vmerge.vim v9, v9, 1, v0
+; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; RV32-NEXT: lui a1, %hi(.LCPI182_0)
+; RV32-NEXT: addi a1, a1, %lo(.LCPI182_0)
+; RV32-NEXT: vle16.v v14, (a1)
+; RV32-NEXT: vsext.vf2 v16, v9
+; RV32-NEXT: vsrl.vv v16, v10, v16
+; RV32-NEXT: vmulhu.vv v14, v16, v14
+; RV32-NEXT: vsub.vv v10, v10, v14
+; RV32-NEXT: vmulhu.vv v10, v10, v12
+; RV32-NEXT: vadd.vv v10, v10, v14
+; RV32-NEXT: lui a1, 2
+; RV32-NEXT: addi a1, a1, 289
+; RV32-NEXT: vmv.s.x v0, a1
+; RV32-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; RV32-NEXT: vmv.v.i v9, 3
+; RV32-NEXT: vmerge.vim v9, v9, 2, v0
+; RV32-NEXT: vmv1r.v v0, v8
+; RV32-NEXT: vmerge.vim v8, v9, 1, v0
+; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; RV32-NEXT: vsext.vf2 v12, v8
+; RV32-NEXT: vsrl.vv v8, v10, v12
+; RV32-NEXT: vse16.v v8, (a0)
+; RV32-NEXT: ret
;
-; LMULMAX1-LABEL: mulhu_v16i16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vle16.v v8, (a1)
-; LMULMAX1-NEXT: lui a2, %hi(.LCPI182_0)
-; LMULMAX1-NEXT: addi a2, a2, %lo(.LCPI182_0)
-; LMULMAX1-NEXT: vle16.v v9, (a2)
-; LMULMAX1-NEXT: vle16.v v10, (a0)
-; LMULMAX1-NEXT: vdivu.vv v8, v8, v9
-; LMULMAX1-NEXT: vdivu.vv v9, v10, v9
-; LMULMAX1-NEXT: vse16.v v9, (a0)
-; LMULMAX1-NEXT: vse16.v v8, (a1)
-; LMULMAX1-NEXT: ret
+; RV64-LABEL: mulhu_v16i16:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: li a1, 257
+; RV64-NEXT: vmv.s.x v0, a1
+; RV64-NEXT: vmv.v.i v10, 0
+; RV64-NEXT: lui a1, 1048568
+; RV64-NEXT: vmerge.vxm v10, v10, a1, v0
+; RV64-NEXT: li a1, 1
+; RV64-NEXT: slli a1, a1, 48
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT: vmv.v.x v12, a1
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: lui a1, %hi(.LCPI182_0)
+; RV64-NEXT: addi a1, a1, %lo(.LCPI182_0)
+; RV64-NEXT: vle16.v v14, (a1)
+; RV64-NEXT: vsext.vf2 v16, v12
+; RV64-NEXT: vsrl.vv v12, v8, v16
+; RV64-NEXT: vmulhu.vv v12, v12, v14
+; RV64-NEXT: vsub.vv v8, v8, v12
+; RV64-NEXT: vmulhu.vv v8, v8, v10
+; RV64-NEXT: vadd.vv v8, v8, v12
+; RV64-NEXT: lui a1, %hi(.LCPI182_1)
+; RV64-NEXT: addi a1, a1, %lo(.LCPI182_1)
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT: vlse64.v v10, (a1), zero
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vsext.vf2 v12, v10
+; RV64-NEXT: vsrl.vv v8, v8, v12
+; RV64-NEXT: vse16.v v8, (a0)
+; RV64-NEXT: ret
%a = load <16 x i16>, ptr %x
%b = udiv <16 x i16> %a, <i16 7, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 7, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
store <16 x i16> %b, ptr %x
@@ -5030,80 +3391,31 @@ define void @mulhu_v16i16(ptr %x) {
}
define void @mulhu_v8i32(ptr %x) {
-; LMULMAX2-LABEL: mulhu_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: li a1, 68
-; LMULMAX2-NEXT: vmv.s.x v0, a1
-; LMULMAX2-NEXT: lui a1, %hi(.LCPI183_0)
-; LMULMAX2-NEXT: addi a1, a1, %lo(.LCPI183_0)
-; LMULMAX2-NEXT: vle32.v v10, (a1)
-; LMULMAX2-NEXT: vmv.v.i v12, 0
-; LMULMAX2-NEXT: lui a1, 524288
-; LMULMAX2-NEXT: vmerge.vxm v12, v12, a1, v0
-; LMULMAX2-NEXT: vmulhu.vv v10, v8, v10
-; LMULMAX2-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-NEXT: vmulhu.vv v8, v8, v12
-; LMULMAX2-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-NEXT: lui a1, 4128
-; LMULMAX2-NEXT: addi a1, a1, 514
-; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX2-NEXT: vmv.v.x v10, a1
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vsext.vf4 v12, v10
-; LMULMAX2-NEXT: vsrl.vv v8, v8, v12
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: mulhu_v8i32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a1, a0, 16
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a1)
-; LMULMAX1-RV32-NEXT: lui a2, 524288
-; LMULMAX1-RV32-NEXT: vmv.s.x v10, a2
-; LMULMAX1-RV32-NEXT: vmv.v.i v11, 0
-; LMULMAX1-RV32-NEXT: vsetivli zero, 3, e32, m1, tu, ma
-; LMULMAX1-RV32-NEXT: vslideup.vi v11, v10, 2
-; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI183_0)
-; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI183_0)
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle32.v v10, (a2)
-; LMULMAX1-RV32-NEXT: vmulhu.vv v12, v9, v10
-; LMULMAX1-RV32-NEXT: vsub.vv v9, v9, v12
-; LMULMAX1-RV32-NEXT: vmulhu.vv v9, v9, v11
-; LMULMAX1-RV32-NEXT: vadd.vv v9, v9, v12
-; LMULMAX1-RV32-NEXT: lui a2, 4128
-; LMULMAX1-RV32-NEXT: addi a2, a2, 514
-; LMULMAX1-RV32-NEXT: vmv.s.x v12, a2
-; LMULMAX1-RV32-NEXT: vsext.vf4 v13, v12
-; LMULMAX1-RV32-NEXT: vsrl.vv v9, v9, v13
-; LMULMAX1-RV32-NEXT: vmulhu.vv v10, v8, v10
-; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v10
-; LMULMAX1-RV32-NEXT: vmulhu.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v10
-; LMULMAX1-RV32-NEXT: vsrl.vv v8, v8, v13
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse32.v v9, (a1)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: mulhu_v8i32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a1, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a1)
-; LMULMAX1-RV64-NEXT: lui a2, 36976
-; LMULMAX1-RV64-NEXT: addi a2, a2, 1541
-; LMULMAX1-RV64-NEXT: vmv.s.x v10, a2
-; LMULMAX1-RV64-NEXT: vsext.vf4 v11, v10
-; LMULMAX1-RV64-NEXT: vdivu.vv v9, v9, v11
-; LMULMAX1-RV64-NEXT: vdivu.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse32.v v9, (a1)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: mulhu_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: li a1, 68
+; CHECK-NEXT: vmv.s.x v0, a1
+; CHECK-NEXT: lui a1, %hi(.LCPI183_0)
+; CHECK-NEXT: addi a1, a1, %lo(.LCPI183_0)
+; CHECK-NEXT: vle32.v v10, (a1)
+; CHECK-NEXT: vmv.v.i v12, 0
+; CHECK-NEXT: lui a1, 524288
+; CHECK-NEXT: vmerge.vxm v12, v12, a1, v0
+; CHECK-NEXT: vmulhu.vv v10, v8, v10
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vmulhu.vv v8, v8, v12
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: lui a1, 4128
+; CHECK-NEXT: addi a1, a1, 514
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vmv.v.x v10, a1
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v10
+; CHECK-NEXT: vsrl.vv v8, v8, v12
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
%b = udiv <8 x i32> %a, <i32 5, i32 6, i32 7, i32 9, i32 5, i32 6, i32 7, i32 9>
store <8 x i32> %b, ptr %x
@@ -5111,131 +3423,61 @@ define void @mulhu_v8i32(ptr %x) {
}
define void @mulhu_v4i64(ptr %x) {
-; LMULMAX2-RV32-LABEL: mulhu_v4i64:
-; LMULMAX2-RV32: # %bb.0:
-; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32-NEXT: lui a1, %hi(.LCPI184_0)
-; LMULMAX2-RV32-NEXT: addi a1, a1, %lo(.LCPI184_0)
-; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vle32.v v10, (a1)
-; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vmulhu.vv v10, v8, v10
-; LMULMAX2-RV32-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-RV32-NEXT: lui a1, 524288
-; LMULMAX2-RV32-NEXT: vmv.s.x v12, a1
-; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vmv.v.i v14, 0
-; LMULMAX2-RV32-NEXT: vsetivli zero, 6, e32, m2, tu, ma
-; LMULMAX2-RV32-NEXT: vslideup.vi v14, v12, 5
-; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vmulhu.vv v8, v8, v14
-; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-RV32-NEXT: lui a1, %hi(.LCPI184_1)
-; LMULMAX2-RV32-NEXT: addi a1, a1, %lo(.LCPI184_1)
-; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vle8.v v10, (a1)
-; LMULMAX2-RV32-NEXT: vsext.vf4 v12, v10
-; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vsrl.vv v8, v8, v12
-; LMULMAX2-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32-NEXT: ret
-;
-; LMULMAX2-RV64-LABEL: mulhu_v4i64:
-; LMULMAX2-RV64: # %bb.0:
-; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64-NEXT: li a1, -1
-; LMULMAX2-RV64-NEXT: slli a1, a1, 63
-; LMULMAX2-RV64-NEXT: vmv.s.x v10, a1
-; LMULMAX2-RV64-NEXT: vmv.v.i v12, 0
-; LMULMAX2-RV64-NEXT: vsetivli zero, 3, e64, m2, tu, ma
-; LMULMAX2-RV64-NEXT: vslideup.vi v12, v10, 2
-; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI184_0)
-; LMULMAX2-RV64-NEXT: addi a1, a1, %lo(.LCPI184_0)
-; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV64-NEXT: vle64.v v10, (a1)
-; LMULMAX2-RV64-NEXT: vmulhu.vv v10, v8, v10
-; LMULMAX2-RV64-NEXT: vsub.vv v8, v8, v10
-; LMULMAX2-RV64-NEXT: vmulhu.vv v8, v8, v12
-; LMULMAX2-RV64-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-RV64-NEXT: lui a1, 12320
-; LMULMAX2-RV64-NEXT: addi a1, a1, 513
-; LMULMAX2-RV64-NEXT: vmv.s.x v10, a1
-; LMULMAX2-RV64-NEXT: vsext.vf8 v12, v10
-; LMULMAX2-RV64-NEXT: vsrl.vv v8, v8, v12
-; LMULMAX2-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: mulhu_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a1, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a1)
-; LMULMAX1-RV32-NEXT: lui a2, 144
-; LMULMAX1-RV32-NEXT: addi a2, a2, 7
-; LMULMAX1-RV32-NEXT: vmv.s.x v10, a2
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vsext.vf4 v11, v10
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vdivu.vv v9, v9, v11
-; LMULMAX1-RV32-NEXT: lui a2, 80
-; LMULMAX1-RV32-NEXT: addi a2, a2, 3
-; LMULMAX1-RV32-NEXT: vmv.s.x v10, a2
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vsext.vf4 v11, v10
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vdivu.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a1)
-; LMULMAX1-RV32-NEXT: ret
+; RV32-LABEL: mulhu_v4i64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vle64.v v8, (a0)
+; RV32-NEXT: lui a1, %hi(.LCPI184_0)
+; RV32-NEXT: addi a1, a1, %lo(.LCPI184_0)
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vle32.v v10, (a1)
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vmulhu.vv v10, v8, v10
+; RV32-NEXT: vsub.vv v8, v8, v10
+; RV32-NEXT: lui a1, 524288
+; RV32-NEXT: vmv.s.x v12, a1
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vmv.v.i v14, 0
+; RV32-NEXT: vsetivli zero, 6, e32, m2, tu, ma
+; RV32-NEXT: vslideup.vi v14, v12, 5
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vmulhu.vv v8, v8, v14
+; RV32-NEXT: vadd.vv v8, v8, v10
+; RV32-NEXT: lui a1, %hi(.LCPI184_1)
+; RV32-NEXT: addi a1, a1, %lo(.LCPI184_1)
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vle8.v v10, (a1)
+; RV32-NEXT: vsext.vf4 v12, v10
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vsrl.vv v8, v8, v12
+; RV32-NEXT: vse64.v v8, (a0)
+; RV32-NEXT: ret
;
-; LMULMAX1-RV64-LABEL: mulhu_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a1, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a1)
-; LMULMAX1-RV64-NEXT: vmv.v.i v10, 0
-; LMULMAX1-RV64-NEXT: li a2, -1
-; LMULMAX1-RV64-NEXT: slli a2, a2, 63
-; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, ma
-; LMULMAX1-RV64-NEXT: vmv.s.x v10, a2
-; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI184_0)
-; LMULMAX1-RV64-NEXT: addi a2, a2, %lo(.LCPI184_0)
-; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vlse64.v v11, (a2), zero
-; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI184_1)
-; LMULMAX1-RV64-NEXT: ld a2, %lo(.LCPI184_1)(a2)
-; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, ma
-; LMULMAX1-RV64-NEXT: vmv.s.x v11, a2
-; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vmulhu.vv v11, v9, v11
-; LMULMAX1-RV64-NEXT: vsub.vv v9, v9, v11
-; LMULMAX1-RV64-NEXT: vmulhu.vv v9, v9, v10
-; LMULMAX1-RV64-NEXT: vadd.vv v9, v9, v11
-; LMULMAX1-RV64-NEXT: vid.v v10
-; LMULMAX1-RV64-NEXT: vadd.vi v11, v10, 2
-; LMULMAX1-RV64-NEXT: vsrl.vv v9, v9, v11
-; LMULMAX1-RV64-NEXT: lui a2, 838861
-; LMULMAX1-RV64-NEXT: addiw a2, a2, -819
-; LMULMAX1-RV64-NEXT: slli a3, a2, 32
-; LMULMAX1-RV64-NEXT: add a2, a2, a3
-; LMULMAX1-RV64-NEXT: vmv.v.x v11, a2
-; LMULMAX1-RV64-NEXT: lui a2, 699051
-; LMULMAX1-RV64-NEXT: addiw a2, a2, -1365
-; LMULMAX1-RV64-NEXT: slli a3, a2, 32
-; LMULMAX1-RV64-NEXT: add a2, a2, a3
-; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, ma
-; LMULMAX1-RV64-NEXT: vmv.s.x v11, a2
-; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vmulhu.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vadd.vi v10, v10, 1
-; LMULMAX1-RV64-NEXT: vsrl.vv v8, v8, v10
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a1)
-; LMULMAX1-RV64-NEXT: ret
+; RV64-LABEL: mulhu_v4i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vle64.v v8, (a0)
+; RV64-NEXT: li a1, -1
+; RV64-NEXT: slli a1, a1, 63
+; RV64-NEXT: vmv.s.x v10, a1
+; RV64-NEXT: vmv.v.i v12, 0
+; RV64-NEXT: vsetivli zero, 3, e64, m2, tu, ma
+; RV64-NEXT: vslideup.vi v12, v10, 2
+; RV64-NEXT: lui a1, %hi(.LCPI184_0)
+; RV64-NEXT: addi a1, a1, %lo(.LCPI184_0)
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vle64.v v10, (a1)
+; RV64-NEXT: vmulhu.vv v10, v8, v10
+; RV64-NEXT: vsub.vv v8, v8, v10
+; RV64-NEXT: vmulhu.vv v8, v8, v12
+; RV64-NEXT: vadd.vv v8, v8, v10
+; RV64-NEXT: lui a1, 12320
+; RV64-NEXT: addi a1, a1, 513
+; RV64-NEXT: vmv.s.x v10, a1
+; RV64-NEXT: vsext.vf8 v12, v10
+; RV64-NEXT: vsrl.vv v8, v8, v12
+; RV64-NEXT: vse64.v v8, (a0)
+; RV64-NEXT: ret
%a = load <4 x i64>, ptr %x
%b = udiv <4 x i64> %a, <i64 3, i64 5, i64 7, i64 9>
store <4 x i64> %b, ptr %x
@@ -5243,44 +3485,26 @@ define void @mulhu_v4i64(ptr %x) {
}
define void @mulhs_v32i8(ptr %x) {
-; LMULMAX2-LABEL: mulhs_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a1, 32
-; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: vmv.v.i v10, 7
-; LMULMAX2-NEXT: lui a1, 304453
-; LMULMAX2-NEXT: addi a1, a1, -1452
-; LMULMAX2-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; LMULMAX2-NEXT: vmv.s.x v0, a1
-; LMULMAX2-NEXT: vsetvli zero, zero, e8, m2, ta, ma
-; LMULMAX2-NEXT: vmerge.vim v10, v10, 1, v0
-; LMULMAX2-NEXT: li a1, -123
-; LMULMAX2-NEXT: vmv.v.x v12, a1
-; LMULMAX2-NEXT: li a1, 57
-; LMULMAX2-NEXT: vmerge.vxm v12, v12, a1, v0
-; LMULMAX2-NEXT: vmulhu.vv v8, v8, v12
-; LMULMAX2-NEXT: vsrl.vv v8, v8, v10
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: mulhs_v32i8:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX1-NEXT: vle8.v v8, (a0)
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vle8.v v9, (a1)
-; LMULMAX1-NEXT: lui a2, 5
-; LMULMAX1-NEXT: addi a2, a2, -1452
-; LMULMAX1-NEXT: vmv.s.x v0, a2
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; LMULMAX1-NEXT: vmv.v.i v10, -9
-; LMULMAX1-NEXT: vmerge.vim v10, v10, 9, v0
-; LMULMAX1-NEXT: vdivu.vv v9, v9, v10
-; LMULMAX1-NEXT: vdivu.vv v8, v8, v10
-; LMULMAX1-NEXT: vse8.v v8, (a0)
-; LMULMAX1-NEXT: vse8.v v9, (a1)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: mulhs_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 32
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vmv.v.i v10, 7
+; CHECK-NEXT: lui a1, 304453
+; CHECK-NEXT: addi a1, a1, -1452
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT: vmv.s.x v0, a1
+; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
+; CHECK-NEXT: li a1, -123
+; CHECK-NEXT: vmv.v.x v12, a1
+; CHECK-NEXT: li a1, 57
+; CHECK-NEXT: vmerge.vxm v12, v12, a1, v0
+; CHECK-NEXT: vmulhu.vv v8, v8, v12
+; CHECK-NEXT: vsrl.vv v8, v8, v10
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %x
%b = udiv <32 x i8> %a, <i8 -9, i8 -9, i8 9, i8 -9, i8 9, i8 -9, i8 9, i8 -9, i8 -9, i8 9, i8 -9, i8 9, i8 -9, i8 -9, i8 9, i8 -9, i8 -9, i8 -9, i8 9, i8 -9, i8 9, i8 -9, i8 9, i8 -9, i8 -9, i8 9, i8 -9, i8 9, i8 -9, i8 -9, i8 9, i8 -9>
store <32 x i8> %b, ptr %x
@@ -5288,41 +3512,25 @@ define void @mulhs_v32i8(ptr %x) {
}
define void @mulhs_v16i16(ptr %x) {
-; LMULMAX2-LABEL: mulhs_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: lui a1, 5
-; LMULMAX2-NEXT: addi a1, a1, -1755
-; LMULMAX2-NEXT: vmv.v.x v10, a1
-; LMULMAX2-NEXT: lui a1, 7
-; LMULMAX2-NEXT: addi a1, a1, -1687
-; LMULMAX2-NEXT: vmv.s.x v0, a1
-; LMULMAX2-NEXT: lui a1, 1048571
-; LMULMAX2-NEXT: addi a1, a1, 1755
-; LMULMAX2-NEXT: vmerge.vxm v10, v10, a1, v0
-; LMULMAX2-NEXT: vmulh.vv v8, v8, v10
-; LMULMAX2-NEXT: vsra.vi v8, v8, 1
-; LMULMAX2-NEXT: vsrl.vi v10, v8, 15
-; LMULMAX2-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-LABEL: mulhs_v16i16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vle16.v v8, (a0)
-; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: vle16.v v9, (a1)
-; LMULMAX1-NEXT: li a2, 105
-; LMULMAX1-NEXT: vmv.s.x v0, a2
-; LMULMAX1-NEXT: vmv.v.i v10, 7
-; LMULMAX1-NEXT: vmerge.vim v10, v10, -7, v0
-; LMULMAX1-NEXT: vdiv.vv v9, v9, v10
-; LMULMAX1-NEXT: vdiv.vv v8, v8, v10
-; LMULMAX1-NEXT: vse16.v v8, (a0)
-; LMULMAX1-NEXT: vse16.v v9, (a1)
-; LMULMAX1-NEXT: ret
+; CHECK-LABEL: mulhs_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: lui a1, 5
+; CHECK-NEXT: addi a1, a1, -1755
+; CHECK-NEXT: vmv.v.x v10, a1
+; CHECK-NEXT: lui a1, 7
+; CHECK-NEXT: addi a1, a1, -1687
+; CHECK-NEXT: vmv.s.x v0, a1
+; CHECK-NEXT: lui a1, 1048571
+; CHECK-NEXT: addi a1, a1, 1755
+; CHECK-NEXT: vmerge.vxm v10, v10, a1, v0
+; CHECK-NEXT: vmulh.vv v8, v8, v10
+; CHECK-NEXT: vsra.vi v8, v8, 1
+; CHECK-NEXT: vsrl.vi v10, v8, 15
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <16 x i16>, ptr %x
%b = sdiv <16 x i16> %a, <i16 -7, i16 7, i16 7, i16 -7, i16 7, i16 -7, i16 -7, i16 7, i16 -7, i16 7, i16 7, i16 -7, i16 7, i16 -7, i16 -7, i16 7>
store <16 x i16> %b, ptr %x
@@ -5330,83 +3538,40 @@ define void @mulhs_v16i16(ptr %x) {
}
define void @mulhs_v8i32(ptr %x) {
-; LMULMAX2-RV32-LABEL: mulhs_v8i32:
-; LMULMAX2-RV32: # %bb.0:
-; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV32-NEXT: lui a1, 419430
-; LMULMAX2-RV32-NEXT: addi a1, a1, 1639
-; LMULMAX2-RV32-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV32-NEXT: li a1, 85
-; LMULMAX2-RV32-NEXT: vmv.s.x v0, a1
-; LMULMAX2-RV32-NEXT: lui a1, 629146
-; LMULMAX2-RV32-NEXT: addi a1, a1, -1639
-; LMULMAX2-RV32-NEXT: vmerge.vxm v10, v10, a1, v0
-; LMULMAX2-RV32-NEXT: vmulh.vv v8, v8, v10
-; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 31
-; LMULMAX2-RV32-NEXT: vsra.vi v8, v8, 1
-; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV32-NEXT: ret
-;
-; LMULMAX2-RV64-LABEL: mulhs_v8i32:
-; LMULMAX2-RV64: # %bb.0:
-; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI187_0)
-; LMULMAX2-RV64-NEXT: addi a1, a1, %lo(.LCPI187_0)
-; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV64-NEXT: vlse64.v v10, (a1), zero
-; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV64-NEXT: vmulh.vv v8, v8, v10
-; LMULMAX2-RV64-NEXT: vsra.vi v8, v8, 1
-; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 31
-; LMULMAX2-RV64-NEXT: vadd.vv v8, v8, v10
-; LMULMAX2-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX2-RV64-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: mulhs_v8i32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a1, a0, 16
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a1)
-; LMULMAX1-RV32-NEXT: lui a2, 419430
-; LMULMAX1-RV32-NEXT: addi a2, a2, 1639
-; LMULMAX1-RV32-NEXT: vmv.v.x v10, a2
-; LMULMAX1-RV32-NEXT: vmv.v.i v0, 5
-; LMULMAX1-RV32-NEXT: lui a2, 629146
-; LMULMAX1-RV32-NEXT: addi a2, a2, -1639
-; LMULMAX1-RV32-NEXT: vmerge.vxm v10, v10, a2, v0
-; LMULMAX1-RV32-NEXT: vmulh.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vsrl.vi v11, v9, 31
-; LMULMAX1-RV32-NEXT: vsra.vi v9, v9, 1
-; LMULMAX1-RV32-NEXT: vadd.vv v9, v9, v11
-; LMULMAX1-RV32-NEXT: vmulh.vv v8, v8, v10
-; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 31
-; LMULMAX1-RV32-NEXT: vsra.vi v8, v8, 1
-; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v10
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse32.v v9, (a1)
-; LMULMAX1-RV32-NEXT: ret
+; RV32-LABEL: mulhs_v8i32:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vle32.v v8, (a0)
+; RV32-NEXT: lui a1, 419430
+; RV32-NEXT: addi a1, a1, 1639
+; RV32-NEXT: vmv.v.x v10, a1
+; RV32-NEXT: li a1, 85
+; RV32-NEXT: vmv.s.x v0, a1
+; RV32-NEXT: lui a1, 629146
+; RV32-NEXT: addi a1, a1, -1639
+; RV32-NEXT: vmerge.vxm v10, v10, a1, v0
+; RV32-NEXT: vmulh.vv v8, v8, v10
+; RV32-NEXT: vsrl.vi v10, v8, 31
+; RV32-NEXT: vsra.vi v8, v8, 1
+; RV32-NEXT: vadd.vv v8, v8, v10
+; RV32-NEXT: vse32.v v8, (a0)
+; RV32-NEXT: ret
;
-; LMULMAX1-RV64-LABEL: mulhs_v8i32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a1, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a1)
-; LMULMAX1-RV64-NEXT: li a2, 3
-; LMULMAX1-RV64-NEXT: slli a2, a2, 33
-; LMULMAX1-RV64-NEXT: addi a2, a2, -5
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vmv.v.x v10, a2
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vdiv.vv v9, v9, v10
-; LMULMAX1-RV64-NEXT: vdiv.vv v8, v8, v10
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse32.v v9, (a1)
-; LMULMAX1-RV64-NEXT: ret
+; RV64-LABEL: mulhs_v8i32:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV64-NEXT: vle32.v v8, (a0)
+; RV64-NEXT: lui a1, %hi(.LCPI187_0)
+; RV64-NEXT: addi a1, a1, %lo(.LCPI187_0)
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vlse64.v v10, (a1), zero
+; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV64-NEXT: vmulh.vv v8, v8, v10
+; RV64-NEXT: vsra.vi v8, v8, 1
+; RV64-NEXT: vsrl.vi v10, v8, 31
+; RV64-NEXT: vadd.vv v8, v8, v10
+; RV64-NEXT: vse32.v v8, (a0)
+; RV64-NEXT: ret
%a = load <8 x i32>, ptr %x
%b = sdiv <8 x i32> %a, <i32 -5, i32 5, i32 -5, i32 5, i32 -5, i32 5, i32 -5, i32 5>
store <8 x i32> %b, ptr %x
@@ -5414,122 +3579,71 @@ define void @mulhs_v8i32(ptr %x) {
}
define void @mulhs_v4i64(ptr %x) {
-; LMULMAX2-RV32-LABEL: mulhs_v4i64:
-; LMULMAX2-RV32: # %bb.0:
-; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV32-NEXT: lui a1, 349525
-; LMULMAX2-RV32-NEXT: addi a2, a1, 1365
-; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vmv.v.x v10, a2
-; LMULMAX2-RV32-NEXT: li a2, 17
-; LMULMAX2-RV32-NEXT: vmv.s.x v0, a2
-; LMULMAX2-RV32-NEXT: addi a1, a1, 1366
-; LMULMAX2-RV32-NEXT: vmerge.vxm v10, v10, a1, v0
-; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vmulh.vv v10, v8, v10
-; LMULMAX2-RV32-NEXT: lui a1, 1048560
-; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX2-RV32-NEXT: vmv.v.x v12, a1
-; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vsext.vf4 v14, v12
-; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vmadd.vv v14, v8, v10
-; LMULMAX2-RV32-NEXT: li a1, 63
-; LMULMAX2-RV32-NEXT: vsrl.vx v8, v14, a1
-; LMULMAX2-RV32-NEXT: lui a1, 16
-; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; LMULMAX2-RV32-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vsext.vf4 v12, v10
-; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV32-NEXT: vsra.vv v10, v14, v12
-; LMULMAX2-RV32-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV32-NEXT: ret
-;
-; LMULMAX2-RV64-LABEL: mulhs_v4i64:
-; LMULMAX2-RV64: # %bb.0:
-; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX2-RV64-NEXT: lui a1, 349525
-; LMULMAX2-RV64-NEXT: addiw a1, a1, 1365
-; LMULMAX2-RV64-NEXT: slli a2, a1, 32
-; LMULMAX2-RV64-NEXT: add a1, a1, a2
-; LMULMAX2-RV64-NEXT: vmv.v.x v10, a1
-; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI188_0)
-; LMULMAX2-RV64-NEXT: ld a1, %lo(.LCPI188_0)(a1)
-; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; LMULMAX2-RV64-NEXT: vmv.v.i v0, 5
-; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-RV64-NEXT: vmerge.vxm v10, v10, a1, v0
-; LMULMAX2-RV64-NEXT: vmulh.vv v10, v8, v10
-; LMULMAX2-RV64-NEXT: lui a1, 1044496
-; LMULMAX2-RV64-NEXT: addi a1, a1, -256
-; LMULMAX2-RV64-NEXT: vmv.s.x v12, a1
-; LMULMAX2-RV64-NEXT: vsext.vf8 v14, v12
-; LMULMAX2-RV64-NEXT: vmadd.vv v14, v8, v10
-; LMULMAX2-RV64-NEXT: li a1, 63
-; LMULMAX2-RV64-NEXT: vsrl.vx v8, v14, a1
-; LMULMAX2-RV64-NEXT: lui a1, 4096
-; LMULMAX2-RV64-NEXT: addi a1, a1, 256
-; LMULMAX2-RV64-NEXT: vmv.s.x v10, a1
-; LMULMAX2-RV64-NEXT: vsext.vf8 v12, v10
-; LMULMAX2-RV64-NEXT: vsra.vv v10, v14, v12
-; LMULMAX2-RV64-NEXT: vadd.vv v8, v10, v8
-; LMULMAX2-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX2-RV64-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: mulhs_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a1, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a1)
-; LMULMAX1-RV32-NEXT: lui a2, 1048528
-; LMULMAX1-RV32-NEXT: addi a2, a2, 3
-; LMULMAX1-RV32-NEXT: vmv.s.x v10, a2
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vsext.vf4 v11, v10
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vdiv.vv v9, v9, v11
-; LMULMAX1-RV32-NEXT: vdiv.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a1)
-; LMULMAX1-RV32-NEXT: ret
+; RV32-LABEL: mulhs_v4i64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vle64.v v8, (a0)
+; RV32-NEXT: lui a1, 349525
+; RV32-NEXT: addi a2, a1, 1365
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vmv.v.x v10, a2
+; RV32-NEXT: li a2, 17
+; RV32-NEXT: vmv.s.x v0, a2
+; RV32-NEXT: addi a1, a1, 1366
+; RV32-NEXT: vmerge.vxm v10, v10, a1, v0
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vmulh.vv v10, v8, v10
+; RV32-NEXT: lui a1, 1048560
+; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT: vmv.v.x v12, a1
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vsext.vf4 v14, v12
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vmadd.vv v14, v8, v10
+; RV32-NEXT: li a1, 63
+; RV32-NEXT: vsrl.vx v8, v14, a1
+; RV32-NEXT: lui a1, 16
+; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT: vmv.v.x v10, a1
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vsext.vf4 v12, v10
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vsra.vv v10, v14, v12
+; RV32-NEXT: vadd.vv v8, v10, v8
+; RV32-NEXT: vse64.v v8, (a0)
+; RV32-NEXT: ret
;
-; LMULMAX1-RV64-LABEL: mulhs_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a1, a0, 16
-; LMULMAX1-RV64-NEXT: lui a2, 349525
-; LMULMAX1-RV64-NEXT: addiw a2, a2, 1365
-; LMULMAX1-RV64-NEXT: slli a3, a2, 32
-; LMULMAX1-RV64-NEXT: add a2, a2, a3
-; LMULMAX1-RV64-NEXT: lui a3, %hi(.LCPI188_0)
-; LMULMAX1-RV64-NEXT: ld a3, %lo(.LCPI188_0)(a3)
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a1)
-; LMULMAX1-RV64-NEXT: vmv.v.x v10, a2
-; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, ma
-; LMULMAX1-RV64-NEXT: vmv.s.x v10, a3
-; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vmulh.vv v11, v9, v10
-; LMULMAX1-RV64-NEXT: vid.v v12
-; LMULMAX1-RV64-NEXT: vrsub.vi v13, v12, 0
-; LMULMAX1-RV64-NEXT: vmacc.vv v11, v13, v9
-; LMULMAX1-RV64-NEXT: li a2, 63
-; LMULMAX1-RV64-NEXT: vsrl.vx v9, v11, a2
-; LMULMAX1-RV64-NEXT: vsra.vv v11, v11, v12
-; LMULMAX1-RV64-NEXT: vadd.vv v9, v11, v9
-; LMULMAX1-RV64-NEXT: vmulh.vv v10, v8, v10
-; LMULMAX1-RV64-NEXT: vmacc.vv v10, v8, v13
-; LMULMAX1-RV64-NEXT: vsrl.vx v8, v10, a2
-; LMULMAX1-RV64-NEXT: vsra.vv v10, v10, v12
-; LMULMAX1-RV64-NEXT: vadd.vv v8, v10, v8
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a1)
-; LMULMAX1-RV64-NEXT: ret
+; RV64-LABEL: mulhs_v4i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vle64.v v8, (a0)
+; RV64-NEXT: lui a1, 349525
+; RV64-NEXT: addiw a1, a1, 1365
+; RV64-NEXT: slli a2, a1, 32
+; RV64-NEXT: add a1, a1, a2
+; RV64-NEXT: vmv.v.x v10, a1
+; RV64-NEXT: lui a1, %hi(.LCPI188_0)
+; RV64-NEXT: ld a1, %lo(.LCPI188_0)(a1)
+; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV64-NEXT: vmv.v.i v0, 5
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vmerge.vxm v10, v10, a1, v0
+; RV64-NEXT: vmulh.vv v10, v8, v10
+; RV64-NEXT: lui a1, 1044496
+; RV64-NEXT: addi a1, a1, -256
+; RV64-NEXT: vmv.s.x v12, a1
+; RV64-NEXT: vsext.vf8 v14, v12
+; RV64-NEXT: vmadd.vv v14, v8, v10
+; RV64-NEXT: li a1, 63
+; RV64-NEXT: vsrl.vx v8, v14, a1
+; RV64-NEXT: lui a1, 4096
+; RV64-NEXT: addi a1, a1, 256
+; RV64-NEXT: vmv.s.x v10, a1
+; RV64-NEXT: vsext.vf8 v12, v10
+; RV64-NEXT: vsra.vv v10, v14, v12
+; RV64-NEXT: vadd.vv v8, v10, v8
+; RV64-NEXT: vse64.v v8, (a0)
+; RV64-NEXT: ret
%a = load <4 x i64>, ptr %x
%b = sdiv <4 x i64> %a, <i64 3, i64 -3, i64 3, i64 -3>
store <4 x i64> %b, ptr %x
@@ -5537,45 +3651,15 @@ define void @mulhs_v4i64(ptr %x) {
}
define void @smin_v32i8(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: smin_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a2, 32
-; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: vle8.v v10, (a1)
-; LMULMAX2-NEXT: vmin.vv v8, v8, v10
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: smin_v32i8:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle8.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vmin.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vmin.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: smin_v32i8:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle8.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vmin.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vmin.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: smin_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v10, (a1)
+; CHECK-NEXT: vmin.vv v8, v8, v10
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %x
%b = load <32 x i8>, ptr %y
%cc = icmp slt <32 x i8> %a, %b
@@ -5585,44 +3669,14 @@ define void @smin_v32i8(ptr %x, ptr %y) {
}
define void @smin_v16i16(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: smin_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vle16.v v10, (a1)
-; LMULMAX2-NEXT: vmin.vv v8, v8, v10
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: smin_v16i16:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle16.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vmin.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vmin.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: smin_v16i16:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle16.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vmin.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vmin.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: smin_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vle16.v v10, (a1)
+; CHECK-NEXT: vmin.vv v8, v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <16 x i16>, ptr %x
%b = load <16 x i16>, ptr %y
%cc = icmp slt <16 x i16> %a, %b
@@ -5632,44 +3686,14 @@ define void @smin_v16i16(ptr %x, ptr %y) {
}
define void @smin_v8i32(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: smin_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vle32.v v10, (a1)
-; LMULMAX2-NEXT: vmin.vv v8, v8, v10
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: smin_v8i32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle32.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vmin.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vmin.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: smin_v8i32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vmin.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vmin.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: smin_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vle32.v v10, (a1)
+; CHECK-NEXT: vmin.vv v8, v8, v10
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
%b = load <8 x i32>, ptr %y
%cc = icmp slt <8 x i32> %a, %b
@@ -5679,44 +3703,14 @@ define void @smin_v8i32(ptr %x, ptr %y) {
}
define void @smin_v4i64(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: smin_v4i64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vle64.v v10, (a1)
-; LMULMAX2-NEXT: vmin.vv v8, v8, v10
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: smin_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle64.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vmin.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vmin.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: smin_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vmin.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vmin.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: smin_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vle64.v v10, (a1)
+; CHECK-NEXT: vmin.vv v8, v8, v10
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <4 x i64>, ptr %x
%b = load <4 x i64>, ptr %y
%cc = icmp slt <4 x i64> %a, %b
@@ -5726,45 +3720,15 @@ define void @smin_v4i64(ptr %x, ptr %y) {
}
define void @smax_v32i8(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: smax_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a2, 32
-; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: vle8.v v10, (a1)
-; LMULMAX2-NEXT: vmax.vv v8, v8, v10
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: smax_v32i8:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle8.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vmax.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vmax.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: smax_v32i8:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle8.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vmax.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vmax.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: smax_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v10, (a1)
+; CHECK-NEXT: vmax.vv v8, v8, v10
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %x
%b = load <32 x i8>, ptr %y
%cc = icmp sgt <32 x i8> %a, %b
@@ -5774,44 +3738,14 @@ define void @smax_v32i8(ptr %x, ptr %y) {
}
define void @smax_v16i16(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: smax_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vle16.v v10, (a1)
-; LMULMAX2-NEXT: vmax.vv v8, v8, v10
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: smax_v16i16:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle16.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vmax.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vmax.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: smax_v16i16:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle16.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vmax.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vmax.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: smax_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vle16.v v10, (a1)
+; CHECK-NEXT: vmax.vv v8, v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <16 x i16>, ptr %x
%b = load <16 x i16>, ptr %y
%cc = icmp sgt <16 x i16> %a, %b
@@ -5821,44 +3755,14 @@ define void @smax_v16i16(ptr %x, ptr %y) {
}
define void @smax_v8i32(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: smax_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vle32.v v10, (a1)
-; LMULMAX2-NEXT: vmax.vv v8, v8, v10
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: smax_v8i32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle32.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vmax.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vmax.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: smax_v8i32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vmax.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vmax.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: smax_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vle32.v v10, (a1)
+; CHECK-NEXT: vmax.vv v8, v8, v10
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
%b = load <8 x i32>, ptr %y
%cc = icmp sgt <8 x i32> %a, %b
@@ -5868,44 +3772,14 @@ define void @smax_v8i32(ptr %x, ptr %y) {
}
define void @smax_v4i64(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: smax_v4i64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vle64.v v10, (a1)
-; LMULMAX2-NEXT: vmax.vv v8, v8, v10
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: smax_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle64.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vmax.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vmax.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: smax_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vmax.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vmax.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: smax_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vle64.v v10, (a1)
+; CHECK-NEXT: vmax.vv v8, v8, v10
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <4 x i64>, ptr %x
%b = load <4 x i64>, ptr %y
%cc = icmp sgt <4 x i64> %a, %b
@@ -5915,45 +3789,15 @@ define void @smax_v4i64(ptr %x, ptr %y) {
}
define void @umin_v32i8(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: umin_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a2, 32
-; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: vle8.v v10, (a1)
-; LMULMAX2-NEXT: vminu.vv v8, v8, v10
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: umin_v32i8:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle8.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vminu.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vminu.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: umin_v32i8:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle8.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vminu.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vminu.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: umin_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v10, (a1)
+; CHECK-NEXT: vminu.vv v8, v8, v10
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %x
%b = load <32 x i8>, ptr %y
%cc = icmp ult <32 x i8> %a, %b
@@ -5963,44 +3807,14 @@ define void @umin_v32i8(ptr %x, ptr %y) {
}
define void @umin_v16i16(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: umin_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vle16.v v10, (a1)
-; LMULMAX2-NEXT: vminu.vv v8, v8, v10
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: umin_v16i16:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle16.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vminu.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vminu.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: umin_v16i16:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle16.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vminu.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vminu.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: umin_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vle16.v v10, (a1)
+; CHECK-NEXT: vminu.vv v8, v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <16 x i16>, ptr %x
%b = load <16 x i16>, ptr %y
%cc = icmp ult <16 x i16> %a, %b
@@ -6010,44 +3824,14 @@ define void @umin_v16i16(ptr %x, ptr %y) {
}
define void @umin_v8i32(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: umin_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vle32.v v10, (a1)
-; LMULMAX2-NEXT: vminu.vv v8, v8, v10
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: umin_v8i32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle32.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vminu.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vminu.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: umin_v8i32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vminu.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vminu.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: umin_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vle32.v v10, (a1)
+; CHECK-NEXT: vminu.vv v8, v8, v10
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
%b = load <8 x i32>, ptr %y
%cc = icmp ult <8 x i32> %a, %b
@@ -6057,44 +3841,14 @@ define void @umin_v8i32(ptr %x, ptr %y) {
}
define void @umin_v4i64(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: umin_v4i64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vle64.v v10, (a1)
-; LMULMAX2-NEXT: vminu.vv v8, v8, v10
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: umin_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle64.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vminu.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vminu.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: umin_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vminu.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vminu.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: umin_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vle64.v v10, (a1)
+; CHECK-NEXT: vminu.vv v8, v8, v10
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <4 x i64>, ptr %x
%b = load <4 x i64>, ptr %y
%cc = icmp ult <4 x i64> %a, %b
@@ -6104,45 +3858,15 @@ define void @umin_v4i64(ptr %x, ptr %y) {
}
define void @umax_v32i8(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: umax_v32i8:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: li a2, 32
-; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; LMULMAX2-NEXT: vle8.v v8, (a0)
-; LMULMAX2-NEXT: vle8.v v10, (a1)
-; LMULMAX2-NEXT: vmaxu.vv v8, v8, v10
-; LMULMAX2-NEXT: vse8.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: umax_v32i8:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle8.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vmaxu.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vmaxu.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: umax_v32i8:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle8.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle8.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vmaxu.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vmaxu.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse8.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse8.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: umax_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v10, (a1)
+; CHECK-NEXT: vmaxu.vv v8, v8, v10
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %x
%b = load <32 x i8>, ptr %y
%cc = icmp ugt <32 x i8> %a, %b
@@ -6152,44 +3876,14 @@ define void @umax_v32i8(ptr %x, ptr %y) {
}
define void @umax_v16i16(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: umax_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vle16.v v8, (a0)
-; LMULMAX2-NEXT: vle16.v v10, (a1)
-; LMULMAX2-NEXT: vmaxu.vv v8, v8, v10
-; LMULMAX2-NEXT: vse16.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: umax_v16i16:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle16.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vmaxu.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vmaxu.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: umax_v16i16:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle16.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle16.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vmaxu.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vmaxu.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse16.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse16.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: umax_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vle16.v v10, (a1)
+; CHECK-NEXT: vmaxu.vv v8, v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <16 x i16>, ptr %x
%b = load <16 x i16>, ptr %y
%cc = icmp ugt <16 x i16> %a, %b
@@ -6199,44 +3893,14 @@ define void @umax_v16i16(ptr %x, ptr %y) {
}
define void @umax_v8i32(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: umax_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vle32.v v8, (a0)
-; LMULMAX2-NEXT: vle32.v v10, (a1)
-; LMULMAX2-NEXT: vmaxu.vv v8, v8, v10
-; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: umax_v8i32:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle32.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vmaxu.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vmaxu.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: umax_v8i32:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle32.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle32.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vmaxu.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vmaxu.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse32.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse32.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: umax_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vle32.v v10, (a1)
+; CHECK-NEXT: vmaxu.vv v8, v8, v10
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
%b = load <8 x i32>, ptr %y
%cc = icmp ugt <8 x i32> %a, %b
@@ -6246,44 +3910,14 @@ define void @umax_v8i32(ptr %x, ptr %y) {
}
define void @umax_v4i64(ptr %x, ptr %y) {
-; LMULMAX2-LABEL: umax_v4i64:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; LMULMAX2-NEXT: vle64.v v8, (a0)
-; LMULMAX2-NEXT: vle64.v v10, (a1)
-; LMULMAX2-NEXT: vmaxu.vv v8, v8, v10
-; LMULMAX2-NEXT: vse64.v v8, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: umax_v4i64:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: addi a3, a1, 16
-; LMULMAX1-RV32-NEXT: vle64.v v10, (a3)
-; LMULMAX1-RV32-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV32-NEXT: vmaxu.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT: vmaxu.vv v8, v8, v11
-; LMULMAX1-RV32-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: umax_v4i64:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: addi a2, a1, 16
-; LMULMAX1-RV64-NEXT: vle64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: addi a2, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v10, (a2)
-; LMULMAX1-RV64-NEXT: vle64.v v11, (a1)
-; LMULMAX1-RV64-NEXT: vmaxu.vv v9, v10, v9
-; LMULMAX1-RV64-NEXT: vmaxu.vv v8, v8, v11
-; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v9, (a2)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: umax_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vle64.v v10, (a1)
+; CHECK-NEXT: vmaxu.vv v8, v8, v10
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <4 x i64>, ptr %x
%b = load <4 x i64>, ptr %y
%cc = icmp ugt <4 x i64> %a, %b
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
index f86286a..c295fed 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
@@ -1,12 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV32,RV32-LMULMAX1
-; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV64,RV64-LMULMAX1
-; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV32,RV32-LMULMAX2
-; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV64,RV64-LMULMAX2
-; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=4 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV32,RV32-LMULMAX4
-; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=4 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV64,RV64-LMULMAX4
-; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV32,RV32-LMULMAX8
-; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV64,RV64-LMULMAX8
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
; Test with ELEN limited
; RUN: llc -mtriple=riscv32 -mattr=+f,+zve32f,+zvl128b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVE32F
; RUN: llc -mtriple=riscv64 -mattr=+f,+zve32f,+zvl128b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVE32F
@@ -284,32 +278,34 @@ define <8 x i1> @buildvec_mask_nonconst_v8i1(i1 %x, i1 %y) {
define <8 x i1> @buildvec_mask_nonconst_v8i1_2(i1 %x, i1 %y, i1 %z, i1 %w) {
; CHECK-LABEL: buildvec_mask_nonconst_v8i1_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.x v8, a0
-; CHECK-NEXT: vslide1down.vx v8, v8, a0
-; CHECK-NEXT: li a4, 1
-; CHECK-NEXT: vslide1down.vx v8, v8, a4
-; CHECK-NEXT: vslide1down.vx v8, v8, a1
-; CHECK-NEXT: vslide1down.vx v8, v8, a0
+; CHECK-NEXT: vslide1down.vx v9, v8, a0
+; CHECK-NEXT: li a0, 1
+; CHECK-NEXT: vslide1down.vx v9, v9, a0
+; CHECK-NEXT: vslide1down.vx v9, v9, a1
; CHECK-NEXT: vslide1down.vx v8, v8, a3
; CHECK-NEXT: vslide1down.vx v8, v8, zero
+; CHECK-NEXT: vmv.v.i v0, 15
; CHECK-NEXT: vslide1down.vx v8, v8, a2
+; CHECK-NEXT: vslidedown.vi v8, v9, 4, v0.t
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
;
; ZVE32F-LABEL: buildvec_mask_nonconst_v8i1_2:
; ZVE32F: # %bb.0:
-; ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; ZVE32F-NEXT: vmv.v.x v8, a0
-; ZVE32F-NEXT: vslide1down.vx v8, v8, a0
-; ZVE32F-NEXT: li a4, 1
-; ZVE32F-NEXT: vslide1down.vx v8, v8, a4
-; ZVE32F-NEXT: vslide1down.vx v8, v8, a1
-; ZVE32F-NEXT: vslide1down.vx v8, v8, a0
+; ZVE32F-NEXT: vslide1down.vx v9, v8, a0
+; ZVE32F-NEXT: li a0, 1
+; ZVE32F-NEXT: vslide1down.vx v9, v9, a0
+; ZVE32F-NEXT: vslide1down.vx v9, v9, a1
; ZVE32F-NEXT: vslide1down.vx v8, v8, a3
; ZVE32F-NEXT: vslide1down.vx v8, v8, zero
+; ZVE32F-NEXT: vmv.v.i v0, 15
; ZVE32F-NEXT: vslide1down.vx v8, v8, a2
+; ZVE32F-NEXT: vslidedown.vi v8, v9, 4, v0.t
; ZVE32F-NEXT: vand.vi v8, v8, 1
; ZVE32F-NEXT: vmsne.vi v0, v8, 0
; ZVE32F-NEXT: ret
@@ -327,32 +323,34 @@ define <8 x i1> @buildvec_mask_nonconst_v8i1_2(i1 %x, i1 %y, i1 %z, i1 %w) {
define <8 x i1> @buildvec_mask_optsize_nonconst_v8i1_2(i1 %x, i1 %y, i1 %z, i1 %w) optsize {
; CHECK-LABEL: buildvec_mask_optsize_nonconst_v8i1_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.x v8, a0
-; CHECK-NEXT: vslide1down.vx v8, v8, a0
-; CHECK-NEXT: li a4, 1
-; CHECK-NEXT: vslide1down.vx v8, v8, a4
-; CHECK-NEXT: vslide1down.vx v8, v8, a1
-; CHECK-NEXT: vslide1down.vx v8, v8, a0
+; CHECK-NEXT: vslide1down.vx v9, v8, a0
+; CHECK-NEXT: li a0, 1
+; CHECK-NEXT: vslide1down.vx v9, v9, a0
+; CHECK-NEXT: vslide1down.vx v9, v9, a1
; CHECK-NEXT: vslide1down.vx v8, v8, a3
; CHECK-NEXT: vslide1down.vx v8, v8, zero
+; CHECK-NEXT: vmv.v.i v0, 15
; CHECK-NEXT: vslide1down.vx v8, v8, a2
+; CHECK-NEXT: vslidedown.vi v8, v9, 4, v0.t
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
;
; ZVE32F-LABEL: buildvec_mask_optsize_nonconst_v8i1_2:
; ZVE32F: # %bb.0:
-; ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; ZVE32F-NEXT: vmv.v.x v8, a0
-; ZVE32F-NEXT: vslide1down.vx v8, v8, a0
-; ZVE32F-NEXT: li a4, 1
-; ZVE32F-NEXT: vslide1down.vx v8, v8, a4
-; ZVE32F-NEXT: vslide1down.vx v8, v8, a1
-; ZVE32F-NEXT: vslide1down.vx v8, v8, a0
+; ZVE32F-NEXT: vslide1down.vx v9, v8, a0
+; ZVE32F-NEXT: li a0, 1
+; ZVE32F-NEXT: vslide1down.vx v9, v9, a0
+; ZVE32F-NEXT: vslide1down.vx v9, v9, a1
; ZVE32F-NEXT: vslide1down.vx v8, v8, a3
; ZVE32F-NEXT: vslide1down.vx v8, v8, zero
+; ZVE32F-NEXT: vmv.v.i v0, 15
; ZVE32F-NEXT: vslide1down.vx v8, v8, a2
+; ZVE32F-NEXT: vslidedown.vi v8, v9, 4, v0.t
; ZVE32F-NEXT: vand.vi v8, v8, 1
; ZVE32F-NEXT: vmsne.vi v0, v8, 0
; ZVE32F-NEXT: ret
@@ -370,30 +368,32 @@ define <8 x i1> @buildvec_mask_optsize_nonconst_v8i1_2(i1 %x, i1 %y, i1 %z, i1 %
define <8 x i1> @buildvec_mask_optsize_nonconst_v8i1(i1 %x, i1 %y) optsize {
; CHECK-LABEL: buildvec_mask_optsize_nonconst_v8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.x v8, a0
-; CHECK-NEXT: vslide1down.vx v8, v8, a0
-; CHECK-NEXT: vslide1down.vx v8, v8, a1
-; CHECK-NEXT: vslide1down.vx v8, v8, a1
-; CHECK-NEXT: vslide1down.vx v8, v8, a0
+; CHECK-NEXT: vslide1down.vx v9, v8, a0
+; CHECK-NEXT: vslide1down.vx v9, v9, a1
+; CHECK-NEXT: vslide1down.vx v9, v9, a1
; CHECK-NEXT: vslide1down.vx v8, v8, a1
; CHECK-NEXT: vslide1down.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.i v0, 15
; CHECK-NEXT: vslide1down.vx v8, v8, a1
+; CHECK-NEXT: vslidedown.vi v8, v9, 4, v0.t
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
;
; ZVE32F-LABEL: buildvec_mask_optsize_nonconst_v8i1:
; ZVE32F: # %bb.0:
-; ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; ZVE32F-NEXT: vmv.v.x v8, a0
-; ZVE32F-NEXT: vslide1down.vx v8, v8, a0
-; ZVE32F-NEXT: vslide1down.vx v8, v8, a1
-; ZVE32F-NEXT: vslide1down.vx v8, v8, a1
-; ZVE32F-NEXT: vslide1down.vx v8, v8, a0
+; ZVE32F-NEXT: vslide1down.vx v9, v8, a0
+; ZVE32F-NEXT: vslide1down.vx v9, v9, a1
+; ZVE32F-NEXT: vslide1down.vx v9, v9, a1
; ZVE32F-NEXT: vslide1down.vx v8, v8, a1
; ZVE32F-NEXT: vslide1down.vx v8, v8, a1
+; ZVE32F-NEXT: vmv.v.i v0, 15
; ZVE32F-NEXT: vslide1down.vx v8, v8, a1
+; ZVE32F-NEXT: vslidedown.vi v8, v9, 4, v0.t
; ZVE32F-NEXT: vand.vi v8, v8, 1
; ZVE32F-NEXT: vmsne.vi v0, v8, 0
; ZVE32F-NEXT: ret
@@ -462,73 +462,13 @@ define <16 x i1> @buildvec_mask_v16i1_undefs() {
}
define <32 x i1> @buildvec_mask_v32i1() {
-; RV32-LMULMAX1-LABEL: buildvec_mask_v32i1:
-; RV32-LMULMAX1: # %bb.0:
-; RV32-LMULMAX1-NEXT: li a0, 1776
-; RV32-LMULMAX1-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-LMULMAX1-NEXT: vmv.s.x v0, a0
-; RV32-LMULMAX1-NEXT: lui a0, 11
-; RV32-LMULMAX1-NEXT: addi a0, a0, 1718
-; RV32-LMULMAX1-NEXT: vmv.s.x v8, a0
-; RV32-LMULMAX1-NEXT: ret
-;
-; RV64-LMULMAX1-LABEL: buildvec_mask_v32i1:
-; RV64-LMULMAX1: # %bb.0:
-; RV64-LMULMAX1-NEXT: li a0, 1776
-; RV64-LMULMAX1-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-LMULMAX1-NEXT: vmv.s.x v0, a0
-; RV64-LMULMAX1-NEXT: lui a0, 11
-; RV64-LMULMAX1-NEXT: addi a0, a0, 1718
-; RV64-LMULMAX1-NEXT: vmv.s.x v8, a0
-; RV64-LMULMAX1-NEXT: ret
-;
-; RV32-LMULMAX2-LABEL: buildvec_mask_v32i1:
-; RV32-LMULMAX2: # %bb.0:
-; RV32-LMULMAX2-NEXT: lui a0, 748384
-; RV32-LMULMAX2-NEXT: addi a0, a0, 1776
-; RV32-LMULMAX2-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-LMULMAX2-NEXT: vmv.s.x v0, a0
-; RV32-LMULMAX2-NEXT: ret
-;
-; RV64-LMULMAX2-LABEL: buildvec_mask_v32i1:
-; RV64-LMULMAX2: # %bb.0:
-; RV64-LMULMAX2-NEXT: lui a0, 748384
-; RV64-LMULMAX2-NEXT: addi a0, a0, 1776
-; RV64-LMULMAX2-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-LMULMAX2-NEXT: vmv.s.x v0, a0
-; RV64-LMULMAX2-NEXT: ret
-;
-; RV32-LMULMAX4-LABEL: buildvec_mask_v32i1:
-; RV32-LMULMAX4: # %bb.0:
-; RV32-LMULMAX4-NEXT: lui a0, 748384
-; RV32-LMULMAX4-NEXT: addi a0, a0, 1776
-; RV32-LMULMAX4-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-LMULMAX4-NEXT: vmv.s.x v0, a0
-; RV32-LMULMAX4-NEXT: ret
-;
-; RV64-LMULMAX4-LABEL: buildvec_mask_v32i1:
-; RV64-LMULMAX4: # %bb.0:
-; RV64-LMULMAX4-NEXT: lui a0, 748384
-; RV64-LMULMAX4-NEXT: addi a0, a0, 1776
-; RV64-LMULMAX4-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-LMULMAX4-NEXT: vmv.s.x v0, a0
-; RV64-LMULMAX4-NEXT: ret
-;
-; RV32-LMULMAX8-LABEL: buildvec_mask_v32i1:
-; RV32-LMULMAX8: # %bb.0:
-; RV32-LMULMAX8-NEXT: lui a0, 748384
-; RV32-LMULMAX8-NEXT: addi a0, a0, 1776
-; RV32-LMULMAX8-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-LMULMAX8-NEXT: vmv.s.x v0, a0
-; RV32-LMULMAX8-NEXT: ret
-;
-; RV64-LMULMAX8-LABEL: buildvec_mask_v32i1:
-; RV64-LMULMAX8: # %bb.0:
-; RV64-LMULMAX8-NEXT: lui a0, 748384
-; RV64-LMULMAX8-NEXT: addi a0, a0, 1776
-; RV64-LMULMAX8-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-LMULMAX8-NEXT: vmv.s.x v0, a0
-; RV64-LMULMAX8-NEXT: ret
+; CHECK-LABEL: buildvec_mask_v32i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, 748384
+; CHECK-NEXT: addi a0, a0, 1776
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: ret
;
; ZVE32F-LABEL: buildvec_mask_v32i1:
; ZVE32F: # %bb.0:
@@ -541,95 +481,25 @@ define <32 x i1> @buildvec_mask_v32i1() {
}
define <64 x i1> @buildvec_mask_v64i1() {
-; RV32-LMULMAX1-LABEL: buildvec_mask_v64i1:
-; RV32-LMULMAX1: # %bb.0:
-; RV32-LMULMAX1-NEXT: li a0, 1776
-; RV32-LMULMAX1-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-LMULMAX1-NEXT: vmv.s.x v0, a0
-; RV32-LMULMAX1-NEXT: lui a0, 4
-; RV32-LMULMAX1-NEXT: addi a0, a0, -1793
-; RV32-LMULMAX1-NEXT: vmv.s.x v9, a0
-; RV32-LMULMAX1-NEXT: lui a0, 11
-; RV32-LMULMAX1-NEXT: addi a0, a0, 1718
-; RV32-LMULMAX1-NEXT: vmv.s.x v8, a0
-; RV32-LMULMAX1-NEXT: vmv.v.v v10, v8
-; RV32-LMULMAX1-NEXT: ret
-;
-; RV64-LMULMAX1-LABEL: buildvec_mask_v64i1:
-; RV64-LMULMAX1: # %bb.0:
-; RV64-LMULMAX1-NEXT: li a0, 1776
-; RV64-LMULMAX1-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-LMULMAX1-NEXT: vmv.s.x v0, a0
-; RV64-LMULMAX1-NEXT: lui a0, 4
-; RV64-LMULMAX1-NEXT: addi a0, a0, -1793
-; RV64-LMULMAX1-NEXT: vmv.s.x v9, a0
-; RV64-LMULMAX1-NEXT: lui a0, 11
-; RV64-LMULMAX1-NEXT: addi a0, a0, 1718
-; RV64-LMULMAX1-NEXT: vmv.s.x v8, a0
-; RV64-LMULMAX1-NEXT: vmv.v.v v10, v8
-; RV64-LMULMAX1-NEXT: ret
-;
-; RV32-LMULMAX2-LABEL: buildvec_mask_v64i1:
-; RV32-LMULMAX2: # %bb.0:
-; RV32-LMULMAX2-NEXT: lui a0, 748384
-; RV32-LMULMAX2-NEXT: addi a0, a0, 1776
-; RV32-LMULMAX2-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-LMULMAX2-NEXT: vmv.s.x v0, a0
-; RV32-LMULMAX2-NEXT: lui a0, 748388
-; RV32-LMULMAX2-NEXT: addi a0, a0, -1793
-; RV32-LMULMAX2-NEXT: vmv.s.x v8, a0
-; RV32-LMULMAX2-NEXT: ret
-;
-; RV64-LMULMAX2-LABEL: buildvec_mask_v64i1:
-; RV64-LMULMAX2: # %bb.0:
-; RV64-LMULMAX2-NEXT: lui a0, 748384
-; RV64-LMULMAX2-NEXT: addi a0, a0, 1776
-; RV64-LMULMAX2-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-LMULMAX2-NEXT: vmv.s.x v0, a0
-; RV64-LMULMAX2-NEXT: lui a0, 748388
-; RV64-LMULMAX2-NEXT: addi a0, a0, -1793
-; RV64-LMULMAX2-NEXT: vmv.s.x v8, a0
-; RV64-LMULMAX2-NEXT: ret
-;
-; RV32-LMULMAX4-LABEL: buildvec_mask_v64i1:
-; RV32-LMULMAX4: # %bb.0:
-; RV32-LMULMAX4-NEXT: lui a0, 748388
-; RV32-LMULMAX4-NEXT: addi a0, a0, -1793
-; RV32-LMULMAX4-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; RV32-LMULMAX4-NEXT: vmv.v.x v0, a0
-; RV32-LMULMAX4-NEXT: lui a0, 748384
-; RV32-LMULMAX4-NEXT: addi a0, a0, 1776
-; RV32-LMULMAX4-NEXT: vsetvli zero, zero, e32, mf2, tu, ma
-; RV32-LMULMAX4-NEXT: vmv.s.x v0, a0
-; RV32-LMULMAX4-NEXT: ret
-;
-; RV64-LMULMAX4-LABEL: buildvec_mask_v64i1:
-; RV64-LMULMAX4: # %bb.0:
-; RV64-LMULMAX4-NEXT: lui a0, %hi(.LCPI19_0)
-; RV64-LMULMAX4-NEXT: addi a0, a0, %lo(.LCPI19_0)
-; RV64-LMULMAX4-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-LMULMAX4-NEXT: vle64.v v0, (a0)
-; RV64-LMULMAX4-NEXT: ret
-;
-; RV32-LMULMAX8-LABEL: buildvec_mask_v64i1:
-; RV32-LMULMAX8: # %bb.0:
-; RV32-LMULMAX8-NEXT: lui a0, 748388
-; RV32-LMULMAX8-NEXT: addi a0, a0, -1793
-; RV32-LMULMAX8-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; RV32-LMULMAX8-NEXT: vmv.v.x v0, a0
-; RV32-LMULMAX8-NEXT: lui a0, 748384
-; RV32-LMULMAX8-NEXT: addi a0, a0, 1776
-; RV32-LMULMAX8-NEXT: vsetvli zero, zero, e32, mf2, tu, ma
-; RV32-LMULMAX8-NEXT: vmv.s.x v0, a0
-; RV32-LMULMAX8-NEXT: ret
-;
-; RV64-LMULMAX8-LABEL: buildvec_mask_v64i1:
-; RV64-LMULMAX8: # %bb.0:
-; RV64-LMULMAX8-NEXT: lui a0, %hi(.LCPI19_0)
-; RV64-LMULMAX8-NEXT: addi a0, a0, %lo(.LCPI19_0)
-; RV64-LMULMAX8-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-LMULMAX8-NEXT: vle64.v v0, (a0)
-; RV64-LMULMAX8-NEXT: ret
+; RV32-LABEL: buildvec_mask_v64i1:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, 748388
+; RV32-NEXT: addi a0, a0, -1793
+; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT: vmv.v.x v0, a0
+; RV32-NEXT: lui a0, 748384
+; RV32-NEXT: addi a0, a0, 1776
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, tu, ma
+; RV32-NEXT: vmv.s.x v0, a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: buildvec_mask_v64i1:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI19_0)
+; RV64-NEXT: addi a0, a0, %lo(.LCPI19_0)
+; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT: vle64.v v0, (a0)
+; RV64-NEXT: ret
;
; ZVE32F-LABEL: buildvec_mask_v64i1:
; ZVE32F: # %bb.0:
@@ -646,134 +516,25 @@ define <64 x i1> @buildvec_mask_v64i1() {
}
define <128 x i1> @buildvec_mask_v128i1() {
-; RV32-LMULMAX1-LABEL: buildvec_mask_v128i1:
-; RV32-LMULMAX1: # %bb.0:
-; RV32-LMULMAX1-NEXT: li a0, 1776
-; RV32-LMULMAX1-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-LMULMAX1-NEXT: vmv.s.x v0, a0
-; RV32-LMULMAX1-NEXT: lui a0, 11
-; RV32-LMULMAX1-NEXT: addi a0, a0, 1718
-; RV32-LMULMAX1-NEXT: vmv.s.x v8, a0
-; RV32-LMULMAX1-NEXT: lui a0, 8
-; RV32-LMULMAX1-NEXT: addi a0, a0, 1718
-; RV32-LMULMAX1-NEXT: vmv.s.x v12, a0
-; RV32-LMULMAX1-NEXT: lui a0, 4
-; RV32-LMULMAX1-NEXT: addi a0, a0, -1793
-; RV32-LMULMAX1-NEXT: vmv.s.x v9, a0
-; RV32-LMULMAX1-NEXT: lui a0, 14
-; RV32-LMULMAX1-NEXT: addi a0, a0, 1722
-; RV32-LMULMAX1-NEXT: vmv.s.x v14, a0
-; RV32-LMULMAX1-NEXT: vmv.v.v v10, v8
-; RV32-LMULMAX1-NEXT: vmv.v.v v11, v0
-; RV32-LMULMAX1-NEXT: vmv.v.v v13, v9
-; RV32-LMULMAX1-NEXT: ret
-;
-; RV64-LMULMAX1-LABEL: buildvec_mask_v128i1:
-; RV64-LMULMAX1: # %bb.0:
-; RV64-LMULMAX1-NEXT: li a0, 1776
-; RV64-LMULMAX1-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-LMULMAX1-NEXT: vmv.s.x v0, a0
-; RV64-LMULMAX1-NEXT: lui a0, 11
-; RV64-LMULMAX1-NEXT: addi a0, a0, 1718
-; RV64-LMULMAX1-NEXT: vmv.s.x v8, a0
-; RV64-LMULMAX1-NEXT: lui a0, 8
-; RV64-LMULMAX1-NEXT: addi a0, a0, 1718
-; RV64-LMULMAX1-NEXT: vmv.s.x v12, a0
-; RV64-LMULMAX1-NEXT: lui a0, 4
-; RV64-LMULMAX1-NEXT: addi a0, a0, -1793
-; RV64-LMULMAX1-NEXT: vmv.s.x v9, a0
-; RV64-LMULMAX1-NEXT: lui a0, 14
-; RV64-LMULMAX1-NEXT: addi a0, a0, 1722
-; RV64-LMULMAX1-NEXT: vmv.s.x v14, a0
-; RV64-LMULMAX1-NEXT: vmv.v.v v10, v8
-; RV64-LMULMAX1-NEXT: vmv.v.v v11, v0
-; RV64-LMULMAX1-NEXT: vmv.v.v v13, v9
-; RV64-LMULMAX1-NEXT: ret
-;
-; RV32-LMULMAX2-LABEL: buildvec_mask_v128i1:
-; RV32-LMULMAX2: # %bb.0:
-; RV32-LMULMAX2-NEXT: lui a0, 748384
-; RV32-LMULMAX2-NEXT: addi a0, a0, 1776
-; RV32-LMULMAX2-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-LMULMAX2-NEXT: vmv.s.x v0, a0
-; RV32-LMULMAX2-NEXT: lui a0, 748388
-; RV32-LMULMAX2-NEXT: addi a0, a0, -1793
-; RV32-LMULMAX2-NEXT: vmv.s.x v8, a0
-; RV32-LMULMAX2-NEXT: lui a0, 551776
-; RV32-LMULMAX2-NEXT: addi a0, a0, 1776
-; RV32-LMULMAX2-NEXT: vmv.s.x v9, a0
-; RV32-LMULMAX2-NEXT: lui a0, 945060
-; RV32-LMULMAX2-NEXT: addi a0, a0, -1793
-; RV32-LMULMAX2-NEXT: vmv.s.x v10, a0
-; RV32-LMULMAX2-NEXT: ret
-;
-; RV64-LMULMAX2-LABEL: buildvec_mask_v128i1:
-; RV64-LMULMAX2: # %bb.0:
-; RV64-LMULMAX2-NEXT: lui a0, 748384
-; RV64-LMULMAX2-NEXT: addi a0, a0, 1776
-; RV64-LMULMAX2-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-LMULMAX2-NEXT: vmv.s.x v0, a0
-; RV64-LMULMAX2-NEXT: lui a0, 748388
-; RV64-LMULMAX2-NEXT: addi a0, a0, -1793
-; RV64-LMULMAX2-NEXT: vmv.s.x v8, a0
-; RV64-LMULMAX2-NEXT: lui a0, 551776
-; RV64-LMULMAX2-NEXT: addi a0, a0, 1776
-; RV64-LMULMAX2-NEXT: vmv.s.x v9, a0
-; RV64-LMULMAX2-NEXT: lui a0, 945060
-; RV64-LMULMAX2-NEXT: addi a0, a0, -1793
-; RV64-LMULMAX2-NEXT: vmv.s.x v10, a0
-; RV64-LMULMAX2-NEXT: ret
-;
-; RV32-LMULMAX4-LABEL: buildvec_mask_v128i1:
-; RV32-LMULMAX4: # %bb.0:
-; RV32-LMULMAX4-NEXT: lui a0, 748388
-; RV32-LMULMAX4-NEXT: addi a0, a0, -1793
-; RV32-LMULMAX4-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; RV32-LMULMAX4-NEXT: vmv.v.x v0, a0
-; RV32-LMULMAX4-NEXT: lui a0, 748384
-; RV32-LMULMAX4-NEXT: addi a0, a0, 1776
-; RV32-LMULMAX4-NEXT: vsetvli zero, zero, e32, mf2, tu, ma
-; RV32-LMULMAX4-NEXT: vmv.s.x v0, a0
-; RV32-LMULMAX4-NEXT: lui a0, 945060
-; RV32-LMULMAX4-NEXT: addi a0, a0, -1793
-; RV32-LMULMAX4-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; RV32-LMULMAX4-NEXT: vmv.v.x v8, a0
-; RV32-LMULMAX4-NEXT: lui a0, 551776
-; RV32-LMULMAX4-NEXT: addi a0, a0, 1776
-; RV32-LMULMAX4-NEXT: vsetvli zero, zero, e32, mf2, tu, ma
-; RV32-LMULMAX4-NEXT: vmv.s.x v8, a0
-; RV32-LMULMAX4-NEXT: ret
-;
-; RV64-LMULMAX4-LABEL: buildvec_mask_v128i1:
-; RV64-LMULMAX4: # %bb.0:
-; RV64-LMULMAX4-NEXT: lui a0, %hi(.LCPI20_0)
-; RV64-LMULMAX4-NEXT: addi a0, a0, %lo(.LCPI20_0)
-; RV64-LMULMAX4-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-LMULMAX4-NEXT: vle64.v v0, (a0)
-; RV64-LMULMAX4-NEXT: lui a0, %hi(.LCPI20_1)
-; RV64-LMULMAX4-NEXT: addi a0, a0, %lo(.LCPI20_1)
-; RV64-LMULMAX4-NEXT: vle64.v v8, (a0)
-; RV64-LMULMAX4-NEXT: ret
-;
-; RV32-LMULMAX8-LABEL: buildvec_mask_v128i1:
-; RV32-LMULMAX8: # %bb.0:
-; RV32-LMULMAX8-NEXT: lui a0, %hi(.LCPI20_0)
-; RV32-LMULMAX8-NEXT: addi a0, a0, %lo(.LCPI20_0)
-; RV32-LMULMAX8-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; RV32-LMULMAX8-NEXT: vle32.v v0, (a0)
-; RV32-LMULMAX8-NEXT: ret
-;
-; RV64-LMULMAX8-LABEL: buildvec_mask_v128i1:
-; RV64-LMULMAX8: # %bb.0:
-; RV64-LMULMAX8-NEXT: lui a0, %hi(.LCPI20_0)
-; RV64-LMULMAX8-NEXT: addi a0, a0, %lo(.LCPI20_0)
-; RV64-LMULMAX8-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV64-LMULMAX8-NEXT: vlse64.v v0, (a0), zero
-; RV64-LMULMAX8-NEXT: lui a0, %hi(.LCPI20_1)
-; RV64-LMULMAX8-NEXT: ld a0, %lo(.LCPI20_1)(a0)
-; RV64-LMULMAX8-NEXT: vsetvli zero, zero, e64, m1, tu, ma
-; RV64-LMULMAX8-NEXT: vmv.s.x v0, a0
-; RV64-LMULMAX8-NEXT: ret
+; RV32-LABEL: buildvec_mask_v128i1:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32-NEXT: addi a0, a0, %lo(.LCPI20_0)
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vle32.v v0, (a0)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: buildvec_mask_v128i1:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI20_0)
+; RV64-NEXT: addi a0, a0, %lo(.LCPI20_0)
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT: vlse64.v v0, (a0), zero
+; RV64-NEXT: lui a0, %hi(.LCPI20_1)
+; RV64-NEXT: ld a0, %lo(.LCPI20_1)(a0)
+; RV64-NEXT: vsetvli zero, zero, e64, m1, tu, ma
+; RV64-NEXT: vmv.s.x v0, a0
+; RV64-NEXT: ret
;
; ZVE32F-LABEL: buildvec_mask_v128i1:
; ZVE32F: # %bb.0:
@@ -786,124 +547,14 @@ define <128 x i1> @buildvec_mask_v128i1() {
}
define <128 x i1> @buildvec_mask_optsize_v128i1() optsize {
-; RV32-LMULMAX1-LABEL: buildvec_mask_optsize_v128i1:
-; RV32-LMULMAX1: # %bb.0:
-; RV32-LMULMAX1-NEXT: li a0, 1776
-; RV32-LMULMAX1-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-LMULMAX1-NEXT: vmv.s.x v0, a0
-; RV32-LMULMAX1-NEXT: lui a0, 11
-; RV32-LMULMAX1-NEXT: addi a0, a0, 1718
-; RV32-LMULMAX1-NEXT: vmv.s.x v8, a0
-; RV32-LMULMAX1-NEXT: lui a0, 8
-; RV32-LMULMAX1-NEXT: addi a0, a0, 1718
-; RV32-LMULMAX1-NEXT: vmv.s.x v12, a0
-; RV32-LMULMAX1-NEXT: lui a0, 4
-; RV32-LMULMAX1-NEXT: addi a0, a0, -1793
-; RV32-LMULMAX1-NEXT: vmv.s.x v9, a0
-; RV32-LMULMAX1-NEXT: lui a0, 14
-; RV32-LMULMAX1-NEXT: addi a0, a0, 1722
-; RV32-LMULMAX1-NEXT: vmv.s.x v14, a0
-; RV32-LMULMAX1-NEXT: vmv.v.v v10, v8
-; RV32-LMULMAX1-NEXT: vmv.v.v v11, v0
-; RV32-LMULMAX1-NEXT: vmv.v.v v13, v9
-; RV32-LMULMAX1-NEXT: ret
-;
-; RV64-LMULMAX1-LABEL: buildvec_mask_optsize_v128i1:
-; RV64-LMULMAX1: # %bb.0:
-; RV64-LMULMAX1-NEXT: li a0, 1776
-; RV64-LMULMAX1-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-LMULMAX1-NEXT: vmv.s.x v0, a0
-; RV64-LMULMAX1-NEXT: lui a0, 11
-; RV64-LMULMAX1-NEXT: addi a0, a0, 1718
-; RV64-LMULMAX1-NEXT: vmv.s.x v8, a0
-; RV64-LMULMAX1-NEXT: lui a0, 8
-; RV64-LMULMAX1-NEXT: addi a0, a0, 1718
-; RV64-LMULMAX1-NEXT: vmv.s.x v12, a0
-; RV64-LMULMAX1-NEXT: lui a0, 4
-; RV64-LMULMAX1-NEXT: addi a0, a0, -1793
-; RV64-LMULMAX1-NEXT: vmv.s.x v9, a0
-; RV64-LMULMAX1-NEXT: lui a0, 14
-; RV64-LMULMAX1-NEXT: addi a0, a0, 1722
-; RV64-LMULMAX1-NEXT: vmv.s.x v14, a0
-; RV64-LMULMAX1-NEXT: vmv.v.v v10, v8
-; RV64-LMULMAX1-NEXT: vmv.v.v v11, v0
-; RV64-LMULMAX1-NEXT: vmv.v.v v13, v9
-; RV64-LMULMAX1-NEXT: ret
-;
-; RV32-LMULMAX2-LABEL: buildvec_mask_optsize_v128i1:
-; RV32-LMULMAX2: # %bb.0:
-; RV32-LMULMAX2-NEXT: lui a0, 748384
-; RV32-LMULMAX2-NEXT: addi a0, a0, 1776
-; RV32-LMULMAX2-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32-LMULMAX2-NEXT: vmv.s.x v0, a0
-; RV32-LMULMAX2-NEXT: lui a0, 748388
-; RV32-LMULMAX2-NEXT: addi a0, a0, -1793
-; RV32-LMULMAX2-NEXT: vmv.s.x v8, a0
-; RV32-LMULMAX2-NEXT: lui a0, 551776
-; RV32-LMULMAX2-NEXT: addi a0, a0, 1776
-; RV32-LMULMAX2-NEXT: vmv.s.x v9, a0
-; RV32-LMULMAX2-NEXT: lui a0, 945060
-; RV32-LMULMAX2-NEXT: addi a0, a0, -1793
-; RV32-LMULMAX2-NEXT: vmv.s.x v10, a0
-; RV32-LMULMAX2-NEXT: ret
-;
-; RV64-LMULMAX2-LABEL: buildvec_mask_optsize_v128i1:
-; RV64-LMULMAX2: # %bb.0:
-; RV64-LMULMAX2-NEXT: lui a0, 748384
-; RV64-LMULMAX2-NEXT: addi a0, a0, 1776
-; RV64-LMULMAX2-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64-LMULMAX2-NEXT: vmv.s.x v0, a0
-; RV64-LMULMAX2-NEXT: lui a0, 748388
-; RV64-LMULMAX2-NEXT: addi a0, a0, -1793
-; RV64-LMULMAX2-NEXT: vmv.s.x v8, a0
-; RV64-LMULMAX2-NEXT: lui a0, 551776
-; RV64-LMULMAX2-NEXT: addi a0, a0, 1776
-; RV64-LMULMAX2-NEXT: vmv.s.x v9, a0
-; RV64-LMULMAX2-NEXT: lui a0, 945060
-; RV64-LMULMAX2-NEXT: addi a0, a0, -1793
-; RV64-LMULMAX2-NEXT: vmv.s.x v10, a0
-; RV64-LMULMAX2-NEXT: ret
-;
-; RV32-LMULMAX4-LABEL: buildvec_mask_optsize_v128i1:
-; RV32-LMULMAX4: # %bb.0:
-; RV32-LMULMAX4-NEXT: lui a0, %hi(.LCPI21_0)
-; RV32-LMULMAX4-NEXT: addi a0, a0, %lo(.LCPI21_0)
-; RV32-LMULMAX4-NEXT: li a1, 64
-; RV32-LMULMAX4-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; RV32-LMULMAX4-NEXT: vlm.v v0, (a0)
-; RV32-LMULMAX4-NEXT: lui a0, %hi(.LCPI21_1)
-; RV32-LMULMAX4-NEXT: addi a0, a0, %lo(.LCPI21_1)
-; RV32-LMULMAX4-NEXT: vlm.v v8, (a0)
-; RV32-LMULMAX4-NEXT: ret
-;
-; RV64-LMULMAX4-LABEL: buildvec_mask_optsize_v128i1:
-; RV64-LMULMAX4: # %bb.0:
-; RV64-LMULMAX4-NEXT: lui a0, %hi(.LCPI21_0)
-; RV64-LMULMAX4-NEXT: addi a0, a0, %lo(.LCPI21_0)
-; RV64-LMULMAX4-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-LMULMAX4-NEXT: vle64.v v0, (a0)
-; RV64-LMULMAX4-NEXT: lui a0, %hi(.LCPI21_1)
-; RV64-LMULMAX4-NEXT: addi a0, a0, %lo(.LCPI21_1)
-; RV64-LMULMAX4-NEXT: vle64.v v8, (a0)
-; RV64-LMULMAX4-NEXT: ret
-;
-; RV32-LMULMAX8-LABEL: buildvec_mask_optsize_v128i1:
-; RV32-LMULMAX8: # %bb.0:
-; RV32-LMULMAX8-NEXT: lui a0, %hi(.LCPI21_0)
-; RV32-LMULMAX8-NEXT: addi a0, a0, %lo(.LCPI21_0)
-; RV32-LMULMAX8-NEXT: li a1, 128
-; RV32-LMULMAX8-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; RV32-LMULMAX8-NEXT: vlm.v v0, (a0)
-; RV32-LMULMAX8-NEXT: ret
-;
-; RV64-LMULMAX8-LABEL: buildvec_mask_optsize_v128i1:
-; RV64-LMULMAX8: # %bb.0:
-; RV64-LMULMAX8-NEXT: lui a0, %hi(.LCPI21_0)
-; RV64-LMULMAX8-NEXT: addi a0, a0, %lo(.LCPI21_0)
-; RV64-LMULMAX8-NEXT: li a1, 128
-; RV64-LMULMAX8-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; RV64-LMULMAX8-NEXT: vlm.v v0, (a0)
-; RV64-LMULMAX8-NEXT: ret
+; CHECK-LABEL: buildvec_mask_optsize_v128i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, %hi(.LCPI21_0)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI21_0)
+; CHECK-NEXT: li a1, 128
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vlm.v v0, (a0)
+; CHECK-NEXT: ret
;
; ZVE32F-LABEL: buildvec_mask_optsize_v128i1:
; ZVE32F: # %bb.0:
@@ -915,6 +566,3 @@ define <128 x i1> @buildvec_mask_optsize_v128i1() optsize {
; ZVE32F-NEXT: ret
ret <128 x i1> <i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 0, i1 1, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 1, i1 1, i1 1>
}
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; CHECK-RV32: {{.*}}
-; CHECK-RV64: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll
index 0d0d21d..b73408d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll
@@ -1,8 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
define void @load_store_v1i1(ptr %x, ptr %y) {
; CHECK-LABEL: load_store_v1i1:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
index cb501c1..4f7b885 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
@@ -1,8 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2
-; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2
-; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1-RV32
-; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1-RV64
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
define void @splat_ones_v1i1(ptr %x) {
; CHECK-LABEL: splat_ones_v1i1:
@@ -163,37 +161,15 @@ define void @splat_zeros_v32i1(ptr %x) {
}
define void @splat_v32i1(ptr %x, i1 %y) {
-; LMULMAX2-LABEL: splat_v32i1:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: andi a1, a1, 1
-; LMULMAX2-NEXT: li a2, 32
-; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; LMULMAX2-NEXT: vmv.v.x v8, a1
-; LMULMAX2-NEXT: vmsne.vi v10, v8, 0
-; LMULMAX2-NEXT: vsm.v v10, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: splat_v32i1:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: andi a1, a1, 1
-; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.x v8, a1
-; LMULMAX1-RV32-NEXT: vmsne.vi v8, v8, 0
-; LMULMAX1-RV32-NEXT: addi a1, a0, 2
-; LMULMAX1-RV32-NEXT: vsm.v v8, (a1)
-; LMULMAX1-RV32-NEXT: vsm.v v8, (a0)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: splat_v32i1:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: andi a1, a1, 1
-; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vmv.v.x v8, a1
-; LMULMAX1-RV64-NEXT: vmsne.vi v8, v8, 0
-; LMULMAX1-RV64-NEXT: addi a1, a0, 2
-; LMULMAX1-RV64-NEXT: vsm.v v8, (a1)
-; LMULMAX1-RV64-NEXT: vsm.v v8, (a0)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: splat_v32i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: andi a1, a1, 1
+; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: vmsne.vi v10, v8, 0
+; CHECK-NEXT: vsm.v v10, (a0)
+; CHECK-NEXT: ret
%a = insertelement <32 x i1> poison, i1 %y, i32 0
%b = shufflevector <32 x i1> %a, <32 x i1> poison, <32 x i32> zeroinitializer
store <32 x i1> %b, ptr %x
@@ -201,70 +177,33 @@ define void @splat_v32i1(ptr %x, i1 %y) {
}
define void @splat_ones_v64i1(ptr %x) {
-; LMULMAX1-RV32-LABEL: splat_ones_v64i1:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmset.m v8
-; LMULMAX1-RV32-NEXT: vsm.v v8, (a0)
-; LMULMAX1-RV32-NEXT: addi a1, a0, 6
-; LMULMAX1-RV32-NEXT: vsm.v v8, (a1)
-; LMULMAX1-RV32-NEXT: addi a1, a0, 4
-; LMULMAX1-RV32-NEXT: vsm.v v8, (a1)
-; LMULMAX1-RV32-NEXT: addi a0, a0, 2
-; LMULMAX1-RV32-NEXT: vsm.v v8, (a0)
-; LMULMAX1-RV32-NEXT: ret
+; RV32-LABEL: splat_ones_v64i1:
+; RV32: # %bb.0:
+; RV32-NEXT: li a1, 64
+; RV32-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; RV32-NEXT: vmset.m v8
+; RV32-NEXT: vsm.v v8, (a0)
+; RV32-NEXT: ret
;
-; LMULMAX1-RV64-LABEL: splat_ones_v64i1:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: li a1, -1
-; LMULMAX1-RV64-NEXT: sd a1, 0(a0)
-; LMULMAX1-RV64-NEXT: ret
+; RV64-LABEL: splat_ones_v64i1:
+; RV64: # %bb.0:
+; RV64-NEXT: li a1, -1
+; RV64-NEXT: sd a1, 0(a0)
+; RV64-NEXT: ret
store <64 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, ptr %x
ret void
}
define void @splat_v64i1(ptr %x, i1 %y) {
-; LMULMAX2-LABEL: splat_v64i1:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: andi a1, a1, 1
-; LMULMAX2-NEXT: li a2, 32
-; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; LMULMAX2-NEXT: vmv.v.x v8, a1
-; LMULMAX2-NEXT: vmsne.vi v10, v8, 0
-; LMULMAX2-NEXT: addi a1, a0, 4
-; LMULMAX2-NEXT: vsm.v v10, (a1)
-; LMULMAX2-NEXT: vsm.v v10, (a0)
-; LMULMAX2-NEXT: ret
-;
-; LMULMAX1-RV32-LABEL: splat_v64i1:
-; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: andi a1, a1, 1
-; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV32-NEXT: vmv.v.x v8, a1
-; LMULMAX1-RV32-NEXT: vmsne.vi v8, v8, 0
-; LMULMAX1-RV32-NEXT: addi a1, a0, 6
-; LMULMAX1-RV32-NEXT: vsm.v v8, (a1)
-; LMULMAX1-RV32-NEXT: addi a1, a0, 4
-; LMULMAX1-RV32-NEXT: vsm.v v8, (a1)
-; LMULMAX1-RV32-NEXT: addi a1, a0, 2
-; LMULMAX1-RV32-NEXT: vsm.v v8, (a1)
-; LMULMAX1-RV32-NEXT: vsm.v v8, (a0)
-; LMULMAX1-RV32-NEXT: ret
-;
-; LMULMAX1-RV64-LABEL: splat_v64i1:
-; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: andi a1, a1, 1
-; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-RV64-NEXT: vmv.v.x v8, a1
-; LMULMAX1-RV64-NEXT: vmsne.vi v8, v8, 0
-; LMULMAX1-RV64-NEXT: addi a1, a0, 6
-; LMULMAX1-RV64-NEXT: vsm.v v8, (a1)
-; LMULMAX1-RV64-NEXT: addi a1, a0, 4
-; LMULMAX1-RV64-NEXT: vsm.v v8, (a1)
-; LMULMAX1-RV64-NEXT: addi a1, a0, 2
-; LMULMAX1-RV64-NEXT: vsm.v v8, (a1)
-; LMULMAX1-RV64-NEXT: vsm.v v8, (a0)
-; LMULMAX1-RV64-NEXT: ret
+; CHECK-LABEL: splat_v64i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: andi a1, a1, 1
+; CHECK-NEXT: li a2, 64
+; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: vmsne.vi v12, v8, 0
+; CHECK-NEXT: vsm.v v12, (a0)
+; CHECK-NEXT: ret
%a = insertelement <64 x i1> poison, i1 %y, i32 0
%b = shufflevector <64 x i1> %a, <64 x i1> poison, <64 x i32> zeroinitializer
store <64 x i1> %b, ptr %x
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector.ll
index 5574d12..0161ac4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector.ll
@@ -1,8 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs -riscv-v-fixed-length-vector-lmul-max=1 < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,RV32LMULMAX1
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs -riscv-v-fixed-length-vector-lmul-max=1 < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,RV64LMULMAX1
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs -riscv-v-fixed-length-vector-lmul-max=2 < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2,RV32LMULMAX2
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs -riscv-v-fixed-length-vector-lmul-max=2 < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2,RV64LMULMAX2
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
declare <2 x i8> @llvm.experimental.stepvector.v2i8()
@@ -103,18 +101,11 @@ define <8 x i16> @stepvector_v8i16() {
declare <16 x i16> @llvm.experimental.stepvector.v16i16()
define <16 x i16> @stepvector_v16i16() {
-; LMULMAX1-LABEL: stepvector_v16i16:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; LMULMAX1-NEXT: vid.v v8
-; LMULMAX1-NEXT: vadd.vi v9, v8, 8
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX2-LABEL: stepvector_v16i16:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; LMULMAX2-NEXT: vid.v v8
-; LMULMAX2-NEXT: ret
+; CHECK-LABEL: stepvector_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vid.v v8
+; CHECK-NEXT: ret
%v = call <16 x i16> @llvm.experimental.stepvector.v16i16()
ret <16 x i16> %v
}
@@ -146,18 +137,11 @@ define <4 x i32> @stepvector_v4i32() {
declare <8 x i32> @llvm.experimental.stepvector.v8i32()
define <8 x i32> @stepvector_v8i32() {
-; LMULMAX1-LABEL: stepvector_v8i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vid.v v8
-; LMULMAX1-NEXT: vadd.vi v9, v8, 4
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX2-LABEL: stepvector_v8i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vid.v v8
-; LMULMAX2-NEXT: ret
+; CHECK-LABEL: stepvector_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vid.v v8
+; CHECK-NEXT: ret
%v = call <8 x i32> @llvm.experimental.stepvector.v8i32()
ret <8 x i32> %v
}
@@ -165,21 +149,11 @@ define <8 x i32> @stepvector_v8i32() {
declare <16 x i32> @llvm.experimental.stepvector.v16i32()
define <16 x i32> @stepvector_v16i32() {
-; LMULMAX1-LABEL: stepvector_v16i32:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT: vid.v v8
-; LMULMAX1-NEXT: vadd.vi v9, v8, 4
-; LMULMAX1-NEXT: vadd.vi v10, v8, 8
-; LMULMAX1-NEXT: vadd.vi v11, v8, 12
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX2-LABEL: stepvector_v16i32:
-; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT: vid.v v8
-; LMULMAX2-NEXT: vadd.vi v10, v8, 8
-; LMULMAX2-NEXT: ret
+; CHECK-LABEL: stepvector_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vid.v v8
+; CHECK-NEXT: ret
%v = call <16 x i32> @llvm.experimental.stepvector.v16i32()
ret <16 x i32> %v
}
@@ -187,33 +161,19 @@ define <16 x i32> @stepvector_v16i32() {
declare <2 x i64> @llvm.experimental.stepvector.v2i64()
define <2 x i64> @stepvector_v2i64() {
-; RV32LMULMAX1-LABEL: stepvector_v2i64:
-; RV32LMULMAX1: # %bb.0:
-; RV32LMULMAX1-NEXT: lui a0, 16
-; RV32LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; RV32LMULMAX1-NEXT: vmv.s.x v9, a0
-; RV32LMULMAX1-NEXT: vsext.vf4 v8, v9
-; RV32LMULMAX1-NEXT: ret
-;
-; RV64LMULMAX1-LABEL: stepvector_v2i64:
-; RV64LMULMAX1: # %bb.0:
-; RV64LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV64LMULMAX1-NEXT: vid.v v8
-; RV64LMULMAX1-NEXT: ret
+; RV32-LABEL: stepvector_v2i64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, 16
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vmv.s.x v9, a0
+; RV32-NEXT: vsext.vf4 v8, v9
+; RV32-NEXT: ret
;
-; RV32LMULMAX2-LABEL: stepvector_v2i64:
-; RV32LMULMAX2: # %bb.0:
-; RV32LMULMAX2-NEXT: lui a0, 16
-; RV32LMULMAX2-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; RV32LMULMAX2-NEXT: vmv.s.x v9, a0
-; RV32LMULMAX2-NEXT: vsext.vf4 v8, v9
-; RV32LMULMAX2-NEXT: ret
-;
-; RV64LMULMAX2-LABEL: stepvector_v2i64:
-; RV64LMULMAX2: # %bb.0:
-; RV64LMULMAX2-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV64LMULMAX2-NEXT: vid.v v8
-; RV64LMULMAX2-NEXT: ret
+; RV64-LABEL: stepvector_v2i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT: vid.v v8
+; RV64-NEXT: ret
%v = call <2 x i64> @llvm.experimental.stepvector.v2i64()
ret <2 x i64> %v
}
@@ -221,39 +181,20 @@ define <2 x i64> @stepvector_v2i64() {
declare <4 x i64> @llvm.experimental.stepvector.v4i64()
define <4 x i64> @stepvector_v4i64() {
-; RV32LMULMAX1-LABEL: stepvector_v4i64:
-; RV32LMULMAX1: # %bb.0:
-; RV32LMULMAX1-NEXT: lui a0, 16
-; RV32LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; RV32LMULMAX1-NEXT: vmv.s.x v9, a0
-; RV32LMULMAX1-NEXT: vsext.vf4 v8, v9
-; RV32LMULMAX1-NEXT: lui a0, 48
-; RV32LMULMAX1-NEXT: addi a0, a0, 2
-; RV32LMULMAX1-NEXT: vmv.s.x v10, a0
-; RV32LMULMAX1-NEXT: vsext.vf4 v9, v10
-; RV32LMULMAX1-NEXT: ret
-;
-; RV64LMULMAX1-LABEL: stepvector_v4i64:
-; RV64LMULMAX1: # %bb.0:
-; RV64LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV64LMULMAX1-NEXT: vid.v v8
-; RV64LMULMAX1-NEXT: vadd.vi v9, v8, 2
-; RV64LMULMAX1-NEXT: ret
+; RV32-LABEL: stepvector_v4i64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI14_0)
+; RV32-NEXT: addi a0, a0, %lo(.LCPI14_0)
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vle8.v v10, (a0)
+; RV32-NEXT: vsext.vf4 v8, v10
+; RV32-NEXT: ret
;
-; RV32LMULMAX2-LABEL: stepvector_v4i64:
-; RV32LMULMAX2: # %bb.0:
-; RV32LMULMAX2-NEXT: lui a0, %hi(.LCPI14_0)
-; RV32LMULMAX2-NEXT: addi a0, a0, %lo(.LCPI14_0)
-; RV32LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32LMULMAX2-NEXT: vle8.v v10, (a0)
-; RV32LMULMAX2-NEXT: vsext.vf4 v8, v10
-; RV32LMULMAX2-NEXT: ret
-;
-; RV64LMULMAX2-LABEL: stepvector_v4i64:
-; RV64LMULMAX2: # %bb.0:
-; RV64LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV64LMULMAX2-NEXT: vid.v v8
-; RV64LMULMAX2-NEXT: ret
+; RV64-LABEL: stepvector_v4i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vid.v v8
+; RV64-NEXT: ret
%v = call <4 x i64> @llvm.experimental.stepvector.v4i64()
ret <4 x i64> %v
}
@@ -261,54 +202,20 @@ define <4 x i64> @stepvector_v4i64() {
declare <8 x i64> @llvm.experimental.stepvector.v8i64()
define <8 x i64> @stepvector_v8i64() {
-; RV32LMULMAX1-LABEL: stepvector_v8i64:
-; RV32LMULMAX1: # %bb.0:
-; RV32LMULMAX1-NEXT: lui a0, 16
-; RV32LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; RV32LMULMAX1-NEXT: vmv.s.x v9, a0
-; RV32LMULMAX1-NEXT: vsext.vf4 v8, v9
-; RV32LMULMAX1-NEXT: lui a0, 48
-; RV32LMULMAX1-NEXT: addi a0, a0, 2
-; RV32LMULMAX1-NEXT: vmv.s.x v10, a0
-; RV32LMULMAX1-NEXT: vsext.vf4 v9, v10
-; RV32LMULMAX1-NEXT: lui a0, 80
-; RV32LMULMAX1-NEXT: addi a0, a0, 4
-; RV32LMULMAX1-NEXT: vmv.s.x v11, a0
-; RV32LMULMAX1-NEXT: vsext.vf4 v10, v11
-; RV32LMULMAX1-NEXT: lui a0, 112
-; RV32LMULMAX1-NEXT: addi a0, a0, 6
-; RV32LMULMAX1-NEXT: vmv.s.x v12, a0
-; RV32LMULMAX1-NEXT: vsext.vf4 v11, v12
-; RV32LMULMAX1-NEXT: ret
-;
-; RV64LMULMAX1-LABEL: stepvector_v8i64:
-; RV64LMULMAX1: # %bb.0:
-; RV64LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV64LMULMAX1-NEXT: vid.v v8
-; RV64LMULMAX1-NEXT: vadd.vi v9, v8, 2
-; RV64LMULMAX1-NEXT: vadd.vi v10, v8, 4
-; RV64LMULMAX1-NEXT: vadd.vi v11, v8, 6
-; RV64LMULMAX1-NEXT: ret
-;
-; RV32LMULMAX2-LABEL: stepvector_v8i64:
-; RV32LMULMAX2: # %bb.0:
-; RV32LMULMAX2-NEXT: lui a0, %hi(.LCPI15_0)
-; RV32LMULMAX2-NEXT: addi a0, a0, %lo(.LCPI15_0)
-; RV32LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32LMULMAX2-NEXT: vle8.v v10, (a0)
-; RV32LMULMAX2-NEXT: lui a0, %hi(.LCPI15_1)
-; RV32LMULMAX2-NEXT: addi a0, a0, %lo(.LCPI15_1)
-; RV32LMULMAX2-NEXT: vle8.v v12, (a0)
-; RV32LMULMAX2-NEXT: vsext.vf4 v8, v10
-; RV32LMULMAX2-NEXT: vsext.vf4 v10, v12
-; RV32LMULMAX2-NEXT: ret
+; RV32-LABEL: stepvector_v8i64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI15_0)
+; RV32-NEXT: addi a0, a0, %lo(.LCPI15_0)
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT: vle8.v v12, (a0)
+; RV32-NEXT: vsext.vf4 v8, v12
+; RV32-NEXT: ret
;
-; RV64LMULMAX2-LABEL: stepvector_v8i64:
-; RV64LMULMAX2: # %bb.0:
-; RV64LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV64LMULMAX2-NEXT: vid.v v8
-; RV64LMULMAX2-NEXT: vadd.vi v10, v8, 4
-; RV64LMULMAX2-NEXT: ret
+; RV64-LABEL: stepvector_v8i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT: vid.v v8
+; RV64-NEXT: ret
%v = call <8 x i64> @llvm.experimental.stepvector.v8i64()
ret <8 x i64> %v
}
@@ -316,84 +223,21 @@ define <8 x i64> @stepvector_v8i64() {
declare <16 x i64> @llvm.experimental.stepvector.v16i64()
define <16 x i64> @stepvector_v16i64() {
-; RV32LMULMAX1-LABEL: stepvector_v16i64:
-; RV32LMULMAX1: # %bb.0:
-; RV32LMULMAX1-NEXT: lui a0, 16
-; RV32LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; RV32LMULMAX1-NEXT: vmv.s.x v9, a0
-; RV32LMULMAX1-NEXT: vsext.vf4 v8, v9
-; RV32LMULMAX1-NEXT: lui a0, 48
-; RV32LMULMAX1-NEXT: addi a0, a0, 2
-; RV32LMULMAX1-NEXT: vmv.s.x v10, a0
-; RV32LMULMAX1-NEXT: vsext.vf4 v9, v10
-; RV32LMULMAX1-NEXT: lui a0, 80
-; RV32LMULMAX1-NEXT: addi a0, a0, 4
-; RV32LMULMAX1-NEXT: vmv.s.x v11, a0
-; RV32LMULMAX1-NEXT: vsext.vf4 v10, v11
-; RV32LMULMAX1-NEXT: lui a0, 112
-; RV32LMULMAX1-NEXT: addi a0, a0, 6
-; RV32LMULMAX1-NEXT: vmv.s.x v12, a0
-; RV32LMULMAX1-NEXT: vsext.vf4 v11, v12
-; RV32LMULMAX1-NEXT: lui a0, 144
-; RV32LMULMAX1-NEXT: addi a0, a0, 8
-; RV32LMULMAX1-NEXT: vmv.s.x v13, a0
-; RV32LMULMAX1-NEXT: vsext.vf4 v12, v13
-; RV32LMULMAX1-NEXT: lui a0, 176
-; RV32LMULMAX1-NEXT: addi a0, a0, 10
-; RV32LMULMAX1-NEXT: vmv.s.x v14, a0
-; RV32LMULMAX1-NEXT: vsext.vf4 v13, v14
-; RV32LMULMAX1-NEXT: lui a0, 208
-; RV32LMULMAX1-NEXT: addi a0, a0, 12
-; RV32LMULMAX1-NEXT: vmv.s.x v15, a0
-; RV32LMULMAX1-NEXT: vsext.vf4 v14, v15
-; RV32LMULMAX1-NEXT: lui a0, 240
-; RV32LMULMAX1-NEXT: addi a0, a0, 14
-; RV32LMULMAX1-NEXT: vmv.s.x v16, a0
-; RV32LMULMAX1-NEXT: vsext.vf4 v15, v16
-; RV32LMULMAX1-NEXT: ret
-;
-; RV64LMULMAX1-LABEL: stepvector_v16i64:
-; RV64LMULMAX1: # %bb.0:
-; RV64LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV64LMULMAX1-NEXT: vid.v v8
-; RV64LMULMAX1-NEXT: vadd.vi v9, v8, 2
-; RV64LMULMAX1-NEXT: vadd.vi v10, v8, 4
-; RV64LMULMAX1-NEXT: vadd.vi v11, v8, 6
-; RV64LMULMAX1-NEXT: vadd.vi v12, v8, 8
-; RV64LMULMAX1-NEXT: vadd.vi v13, v8, 10
-; RV64LMULMAX1-NEXT: vadd.vi v14, v8, 12
-; RV64LMULMAX1-NEXT: vadd.vi v15, v8, 14
-; RV64LMULMAX1-NEXT: ret
-;
-; RV32LMULMAX2-LABEL: stepvector_v16i64:
-; RV32LMULMAX2: # %bb.0:
-; RV32LMULMAX2-NEXT: lui a0, %hi(.LCPI16_0)
-; RV32LMULMAX2-NEXT: addi a0, a0, %lo(.LCPI16_0)
-; RV32LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32LMULMAX2-NEXT: vle8.v v10, (a0)
-; RV32LMULMAX2-NEXT: lui a0, %hi(.LCPI16_1)
-; RV32LMULMAX2-NEXT: addi a0, a0, %lo(.LCPI16_1)
-; RV32LMULMAX2-NEXT: vle8.v v12, (a0)
-; RV32LMULMAX2-NEXT: lui a0, %hi(.LCPI16_2)
-; RV32LMULMAX2-NEXT: addi a0, a0, %lo(.LCPI16_2)
-; RV32LMULMAX2-NEXT: vle8.v v14, (a0)
-; RV32LMULMAX2-NEXT: lui a0, %hi(.LCPI16_3)
-; RV32LMULMAX2-NEXT: addi a0, a0, %lo(.LCPI16_3)
-; RV32LMULMAX2-NEXT: vle8.v v16, (a0)
-; RV32LMULMAX2-NEXT: vsext.vf4 v8, v10
-; RV32LMULMAX2-NEXT: vsext.vf4 v10, v12
-; RV32LMULMAX2-NEXT: vsext.vf4 v12, v14
-; RV32LMULMAX2-NEXT: vsext.vf4 v14, v16
-; RV32LMULMAX2-NEXT: ret
+; RV32-LABEL: stepvector_v16i64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI16_0)
+; RV32-NEXT: addi a0, a0, %lo(.LCPI16_0)
+; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT: vle8.v v16, (a0)
+; RV32-NEXT: vsext.vf4 v8, v16
+; RV32-NEXT: ret
;
-; RV64LMULMAX2-LABEL: stepvector_v16i64:
-; RV64LMULMAX2: # %bb.0:
-; RV64LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV64LMULMAX2-NEXT: vid.v v8
-; RV64LMULMAX2-NEXT: vadd.vi v10, v8, 4
-; RV64LMULMAX2-NEXT: vadd.vi v12, v8, 8
-; RV64LMULMAX2-NEXT: vadd.vi v14, v8, 12
-; RV64LMULMAX2-NEXT: ret
+; RV64-LABEL: stepvector_v16i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vid.v v8
+; RV64-NEXT: ret
%v = call <16 x i64> @llvm.experimental.stepvector.v16i64()
ret <16 x i64> %v
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll
index b18e235..44d4a8a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll
@@ -1,8 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs -riscv-v-fixed-length-vector-lmul-max=1 < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs -riscv-v-fixed-length-vector-lmul-max=1 < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs -riscv-v-fixed-length-vector-lmul-max=8 < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs -riscv-v-fixed-length-vector-lmul-max=8 < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s
declare i1 @llvm.vector.reduce.or.v1i1(<1 x i1>)
@@ -474,21 +472,13 @@ define zeroext i1 @vreduce_smin_v16i1(<16 x i1> %v) {
declare i1 @llvm.vector.reduce.or.v32i1(<32 x i1>)
define zeroext i1 @vreduce_or_v32i1(<32 x i1> %v) {
-; LMULMAX1-LABEL: vreduce_or_v32i1:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vmor.mm v8, v0, v8
-; LMULMAX1-NEXT: vcpop.m a0, v8
-; LMULMAX1-NEXT: snez a0, a0
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX8-LABEL: vreduce_or_v32i1:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a0, 32
-; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; LMULMAX8-NEXT: vcpop.m a0, v0
-; LMULMAX8-NEXT: snez a0, a0
-; LMULMAX8-NEXT: ret
+; CHECK-LABEL: vreduce_or_v32i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vcpop.m a0, v0
+; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: ret
%red = call i1 @llvm.vector.reduce.or.v32i1(<32 x i1> %v)
ret i1 %red
}
@@ -496,21 +486,13 @@ define zeroext i1 @vreduce_or_v32i1(<32 x i1> %v) {
declare i1 @llvm.vector.reduce.xor.v32i1(<32 x i1>)
define zeroext i1 @vreduce_xor_v32i1(<32 x i1> %v) {
-; LMULMAX1-LABEL: vreduce_xor_v32i1:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vmxor.mm v8, v0, v8
-; LMULMAX1-NEXT: vcpop.m a0, v8
-; LMULMAX1-NEXT: andi a0, a0, 1
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX8-LABEL: vreduce_xor_v32i1:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a0, 32
-; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; LMULMAX8-NEXT: vcpop.m a0, v0
-; LMULMAX8-NEXT: andi a0, a0, 1
-; LMULMAX8-NEXT: ret
+; CHECK-LABEL: vreduce_xor_v32i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vcpop.m a0, v0
+; CHECK-NEXT: andi a0, a0, 1
+; CHECK-NEXT: ret
%red = call i1 @llvm.vector.reduce.xor.v32i1(<32 x i1> %v)
ret i1 %red
}
@@ -518,22 +500,14 @@ define zeroext i1 @vreduce_xor_v32i1(<32 x i1> %v) {
declare i1 @llvm.vector.reduce.and.v32i1(<32 x i1>)
define zeroext i1 @vreduce_and_v32i1(<32 x i1> %v) {
-; LMULMAX1-LABEL: vreduce_and_v32i1:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vmnand.mm v8, v0, v8
-; LMULMAX1-NEXT: vcpop.m a0, v8
-; LMULMAX1-NEXT: seqz a0, a0
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX8-LABEL: vreduce_and_v32i1:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a0, 32
-; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; LMULMAX8-NEXT: vmnot.m v8, v0
-; LMULMAX8-NEXT: vcpop.m a0, v8
-; LMULMAX8-NEXT: seqz a0, a0
-; LMULMAX8-NEXT: ret
+; CHECK-LABEL: vreduce_and_v32i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vmnot.m v8, v0
+; CHECK-NEXT: vcpop.m a0, v8
+; CHECK-NEXT: seqz a0, a0
+; CHECK-NEXT: ret
%red = call i1 @llvm.vector.reduce.and.v32i1(<32 x i1> %v)
ret i1 %red
}
@@ -541,21 +515,13 @@ define zeroext i1 @vreduce_and_v32i1(<32 x i1> %v) {
declare i1 @llvm.vector.reduce.umax.v32i1(<32 x i1>)
define zeroext i1 @vreduce_umax_v32i1(<32 x i1> %v) {
-; LMULMAX1-LABEL: vreduce_umax_v32i1:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vmor.mm v8, v0, v8
-; LMULMAX1-NEXT: vcpop.m a0, v8
-; LMULMAX1-NEXT: snez a0, a0
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX8-LABEL: vreduce_umax_v32i1:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a0, 32
-; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; LMULMAX8-NEXT: vcpop.m a0, v0
-; LMULMAX8-NEXT: snez a0, a0
-; LMULMAX8-NEXT: ret
+; CHECK-LABEL: vreduce_umax_v32i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vcpop.m a0, v0
+; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: ret
%red = call i1 @llvm.vector.reduce.umax.v32i1(<32 x i1> %v)
ret i1 %red
}
@@ -563,22 +529,14 @@ define zeroext i1 @vreduce_umax_v32i1(<32 x i1> %v) {
declare i1 @llvm.vector.reduce.smax.v32i1(<32 x i1>)
define zeroext i1 @vreduce_smax_v32i1(<32 x i1> %v) {
-; LMULMAX1-LABEL: vreduce_smax_v32i1:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vmnand.mm v8, v0, v8
-; LMULMAX1-NEXT: vcpop.m a0, v8
-; LMULMAX1-NEXT: seqz a0, a0
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX8-LABEL: vreduce_smax_v32i1:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a0, 32
-; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; LMULMAX8-NEXT: vmnot.m v8, v0
-; LMULMAX8-NEXT: vcpop.m a0, v8
-; LMULMAX8-NEXT: seqz a0, a0
-; LMULMAX8-NEXT: ret
+; CHECK-LABEL: vreduce_smax_v32i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vmnot.m v8, v0
+; CHECK-NEXT: vcpop.m a0, v8
+; CHECK-NEXT: seqz a0, a0
+; CHECK-NEXT: ret
%red = call i1 @llvm.vector.reduce.smax.v32i1(<32 x i1> %v)
ret i1 %red
}
@@ -586,22 +544,14 @@ define zeroext i1 @vreduce_smax_v32i1(<32 x i1> %v) {
declare i1 @llvm.vector.reduce.umin.v32i1(<32 x i1>)
define zeroext i1 @vreduce_umin_v32i1(<32 x i1> %v) {
-; LMULMAX1-LABEL: vreduce_umin_v32i1:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vmnand.mm v8, v0, v8
-; LMULMAX1-NEXT: vcpop.m a0, v8
-; LMULMAX1-NEXT: seqz a0, a0
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX8-LABEL: vreduce_umin_v32i1:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a0, 32
-; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; LMULMAX8-NEXT: vmnot.m v8, v0
-; LMULMAX8-NEXT: vcpop.m a0, v8
-; LMULMAX8-NEXT: seqz a0, a0
-; LMULMAX8-NEXT: ret
+; CHECK-LABEL: vreduce_umin_v32i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vmnot.m v8, v0
+; CHECK-NEXT: vcpop.m a0, v8
+; CHECK-NEXT: seqz a0, a0
+; CHECK-NEXT: ret
%red = call i1 @llvm.vector.reduce.umin.v32i1(<32 x i1> %v)
ret i1 %red
}
@@ -609,21 +559,13 @@ define zeroext i1 @vreduce_umin_v32i1(<32 x i1> %v) {
declare i1 @llvm.vector.reduce.smin.v32i1(<32 x i1>)
define zeroext i1 @vreduce_smin_v32i1(<32 x i1> %v) {
-; LMULMAX1-LABEL: vreduce_smin_v32i1:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vmor.mm v8, v0, v8
-; LMULMAX1-NEXT: vcpop.m a0, v8
-; LMULMAX1-NEXT: snez a0, a0
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX8-LABEL: vreduce_smin_v32i1:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a0, 32
-; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; LMULMAX8-NEXT: vcpop.m a0, v0
-; LMULMAX8-NEXT: snez a0, a0
-; LMULMAX8-NEXT: ret
+; CHECK-LABEL: vreduce_smin_v32i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vcpop.m a0, v0
+; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: ret
%red = call i1 @llvm.vector.reduce.smin.v32i1(<32 x i1> %v)
ret i1 %red
}
@@ -631,23 +573,13 @@ define zeroext i1 @vreduce_smin_v32i1(<32 x i1> %v) {
declare i1 @llvm.vector.reduce.or.v64i1(<64 x i1>)
define zeroext i1 @vreduce_or_v64i1(<64 x i1> %v) {
-; LMULMAX1-LABEL: vreduce_or_v64i1:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vmor.mm v8, v8, v10
-; LMULMAX1-NEXT: vmor.mm v9, v0, v9
-; LMULMAX1-NEXT: vmor.mm v8, v9, v8
-; LMULMAX1-NEXT: vcpop.m a0, v8
-; LMULMAX1-NEXT: snez a0, a0
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX8-LABEL: vreduce_or_v64i1:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a0, 64
-; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; LMULMAX8-NEXT: vcpop.m a0, v0
-; LMULMAX8-NEXT: snez a0, a0
-; LMULMAX8-NEXT: ret
+; CHECK-LABEL: vreduce_or_v64i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 64
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vcpop.m a0, v0
+; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: ret
%red = call i1 @llvm.vector.reduce.or.v64i1(<64 x i1> %v)
ret i1 %red
}
@@ -655,23 +587,13 @@ define zeroext i1 @vreduce_or_v64i1(<64 x i1> %v) {
declare i1 @llvm.vector.reduce.xor.v64i1(<64 x i1>)
define zeroext i1 @vreduce_xor_v64i1(<64 x i1> %v) {
-; LMULMAX1-LABEL: vreduce_xor_v64i1:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vmxor.mm v8, v8, v10
-; LMULMAX1-NEXT: vmxor.mm v9, v0, v9
-; LMULMAX1-NEXT: vmxor.mm v8, v9, v8
-; LMULMAX1-NEXT: vcpop.m a0, v8
-; LMULMAX1-NEXT: andi a0, a0, 1
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX8-LABEL: vreduce_xor_v64i1:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a0, 64
-; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; LMULMAX8-NEXT: vcpop.m a0, v0
-; LMULMAX8-NEXT: andi a0, a0, 1
-; LMULMAX8-NEXT: ret
+; CHECK-LABEL: vreduce_xor_v64i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 64
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vcpop.m a0, v0
+; CHECK-NEXT: andi a0, a0, 1
+; CHECK-NEXT: ret
%red = call i1 @llvm.vector.reduce.xor.v64i1(<64 x i1> %v)
ret i1 %red
}
@@ -679,24 +601,14 @@ define zeroext i1 @vreduce_xor_v64i1(<64 x i1> %v) {
declare i1 @llvm.vector.reduce.and.v64i1(<64 x i1>)
define zeroext i1 @vreduce_and_v64i1(<64 x i1> %v) {
-; LMULMAX1-LABEL: vreduce_and_v64i1:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vmand.mm v8, v8, v10
-; LMULMAX1-NEXT: vmand.mm v9, v0, v9
-; LMULMAX1-NEXT: vmnand.mm v8, v9, v8
-; LMULMAX1-NEXT: vcpop.m a0, v8
-; LMULMAX1-NEXT: seqz a0, a0
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX8-LABEL: vreduce_and_v64i1:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a0, 64
-; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; LMULMAX8-NEXT: vmnot.m v8, v0
-; LMULMAX8-NEXT: vcpop.m a0, v8
-; LMULMAX8-NEXT: seqz a0, a0
-; LMULMAX8-NEXT: ret
+; CHECK-LABEL: vreduce_and_v64i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 64
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vmnot.m v8, v0
+; CHECK-NEXT: vcpop.m a0, v8
+; CHECK-NEXT: seqz a0, a0
+; CHECK-NEXT: ret
%red = call i1 @llvm.vector.reduce.and.v64i1(<64 x i1> %v)
ret i1 %red
}
@@ -704,23 +616,13 @@ define zeroext i1 @vreduce_and_v64i1(<64 x i1> %v) {
declare i1 @llvm.vector.reduce.umax.v64i1(<64 x i1>)
define zeroext i1 @vreduce_umax_v64i1(<64 x i1> %v) {
-; LMULMAX1-LABEL: vreduce_umax_v64i1:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vmor.mm v8, v8, v10
-; LMULMAX1-NEXT: vmor.mm v9, v0, v9
-; LMULMAX1-NEXT: vmor.mm v8, v9, v8
-; LMULMAX1-NEXT: vcpop.m a0, v8
-; LMULMAX1-NEXT: snez a0, a0
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX8-LABEL: vreduce_umax_v64i1:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a0, 64
-; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; LMULMAX8-NEXT: vcpop.m a0, v0
-; LMULMAX8-NEXT: snez a0, a0
-; LMULMAX8-NEXT: ret
+; CHECK-LABEL: vreduce_umax_v64i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 64
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vcpop.m a0, v0
+; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: ret
%red = call i1 @llvm.vector.reduce.umax.v64i1(<64 x i1> %v)
ret i1 %red
}
@@ -728,24 +630,14 @@ define zeroext i1 @vreduce_umax_v64i1(<64 x i1> %v) {
declare i1 @llvm.vector.reduce.smax.v64i1(<64 x i1>)
define zeroext i1 @vreduce_smax_v64i1(<64 x i1> %v) {
-; LMULMAX1-LABEL: vreduce_smax_v64i1:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vmand.mm v8, v8, v10
-; LMULMAX1-NEXT: vmand.mm v9, v0, v9
-; LMULMAX1-NEXT: vmnand.mm v8, v9, v8
-; LMULMAX1-NEXT: vcpop.m a0, v8
-; LMULMAX1-NEXT: seqz a0, a0
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX8-LABEL: vreduce_smax_v64i1:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a0, 64
-; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; LMULMAX8-NEXT: vmnot.m v8, v0
-; LMULMAX8-NEXT: vcpop.m a0, v8
-; LMULMAX8-NEXT: seqz a0, a0
-; LMULMAX8-NEXT: ret
+; CHECK-LABEL: vreduce_smax_v64i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 64
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vmnot.m v8, v0
+; CHECK-NEXT: vcpop.m a0, v8
+; CHECK-NEXT: seqz a0, a0
+; CHECK-NEXT: ret
%red = call i1 @llvm.vector.reduce.smax.v64i1(<64 x i1> %v)
ret i1 %red
}
@@ -753,24 +645,14 @@ define zeroext i1 @vreduce_smax_v64i1(<64 x i1> %v) {
declare i1 @llvm.vector.reduce.umin.v64i1(<64 x i1>)
define zeroext i1 @vreduce_umin_v64i1(<64 x i1> %v) {
-; LMULMAX1-LABEL: vreduce_umin_v64i1:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vmand.mm v8, v8, v10
-; LMULMAX1-NEXT: vmand.mm v9, v0, v9
-; LMULMAX1-NEXT: vmnand.mm v8, v9, v8
-; LMULMAX1-NEXT: vcpop.m a0, v8
-; LMULMAX1-NEXT: seqz a0, a0
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX8-LABEL: vreduce_umin_v64i1:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a0, 64
-; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; LMULMAX8-NEXT: vmnot.m v8, v0
-; LMULMAX8-NEXT: vcpop.m a0, v8
-; LMULMAX8-NEXT: seqz a0, a0
-; LMULMAX8-NEXT: ret
+; CHECK-LABEL: vreduce_umin_v64i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 64
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vmnot.m v8, v0
+; CHECK-NEXT: vcpop.m a0, v8
+; CHECK-NEXT: seqz a0, a0
+; CHECK-NEXT: ret
%red = call i1 @llvm.vector.reduce.umin.v64i1(<64 x i1> %v)
ret i1 %red
}
@@ -778,23 +660,13 @@ define zeroext i1 @vreduce_umin_v64i1(<64 x i1> %v) {
declare i1 @llvm.vector.reduce.smin.v64i1(<64 x i1>)
define zeroext i1 @vreduce_smin_v64i1(<64 x i1> %v) {
-; LMULMAX1-LABEL: vreduce_smin_v64i1:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vmor.mm v8, v8, v10
-; LMULMAX1-NEXT: vmor.mm v9, v0, v9
-; LMULMAX1-NEXT: vmor.mm v8, v9, v8
-; LMULMAX1-NEXT: vcpop.m a0, v8
-; LMULMAX1-NEXT: snez a0, a0
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX8-LABEL: vreduce_smin_v64i1:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a0, 64
-; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; LMULMAX8-NEXT: vcpop.m a0, v0
-; LMULMAX8-NEXT: snez a0, a0
-; LMULMAX8-NEXT: ret
+; CHECK-LABEL: vreduce_smin_v64i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 64
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vcpop.m a0, v0
+; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: ret
%red = call i1 @llvm.vector.reduce.smin.v64i1(<64 x i1> %v)
ret i1 %red
}
@@ -867,21 +739,13 @@ define zeroext i1 @vreduce_add_v16i1(<16 x i1> %v) {
declare i1 @llvm.vector.reduce.add.v32i1(<32 x i1>)
define zeroext i1 @vreduce_add_v32i1(<32 x i1> %v) {
-; LMULMAX1-LABEL: vreduce_add_v32i1:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vmxor.mm v8, v0, v8
-; LMULMAX1-NEXT: vcpop.m a0, v8
-; LMULMAX1-NEXT: andi a0, a0, 1
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX8-LABEL: vreduce_add_v32i1:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a0, 32
-; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; LMULMAX8-NEXT: vcpop.m a0, v0
-; LMULMAX8-NEXT: andi a0, a0, 1
-; LMULMAX8-NEXT: ret
+; CHECK-LABEL: vreduce_add_v32i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vcpop.m a0, v0
+; CHECK-NEXT: andi a0, a0, 1
+; CHECK-NEXT: ret
%red = call i1 @llvm.vector.reduce.add.v32i1(<32 x i1> %v)
ret i1 %red
}
@@ -889,23 +753,13 @@ define zeroext i1 @vreduce_add_v32i1(<32 x i1> %v) {
declare i1 @llvm.vector.reduce.add.v64i1(<64 x i1>)
define zeroext i1 @vreduce_add_v64i1(<64 x i1> %v) {
-; LMULMAX1-LABEL: vreduce_add_v64i1:
-; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; LMULMAX1-NEXT: vmxor.mm v8, v8, v10
-; LMULMAX1-NEXT: vmxor.mm v9, v0, v9
-; LMULMAX1-NEXT: vmxor.mm v8, v9, v8
-; LMULMAX1-NEXT: vcpop.m a0, v8
-; LMULMAX1-NEXT: andi a0, a0, 1
-; LMULMAX1-NEXT: ret
-;
-; LMULMAX8-LABEL: vreduce_add_v64i1:
-; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: li a0, 64
-; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; LMULMAX8-NEXT: vcpop.m a0, v0
-; LMULMAX8-NEXT: andi a0, a0, 1
-; LMULMAX8-NEXT: ret
+; CHECK-LABEL: vreduce_add_v64i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 64
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vcpop.m a0, v0
+; CHECK-NEXT: andi a0, a0, 1
+; CHECK-NEXT: ret
%red = call i1 @llvm.vector.reduce.add.v64i1(<64 x i1> %v)
ret i1 %red
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
index c8de041..7bffbaa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
@@ -880,3 +880,57 @@ define <2 x i64> @vwadd_vx_v2i64_i64(ptr %x, ptr %y) nounwind {
%g = add <2 x i64> %e, %f
ret <2 x i64> %g
}
+
+define <2 x i32> @vwadd_v2i32_of_v2i8(ptr %x, ptr %y) {
+; CHECK-LABEL: vwadd_v2i32_of_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vle8.v v8, (a1)
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v11, v10
+; CHECK-NEXT: ret
+ %a = load <2 x i8>, ptr %x
+ %b = load <2 x i8>, ptr %y
+ %c = sext <2 x i8> %a to <2 x i32>
+ %d = sext <2 x i8> %b to <2 x i32>
+ %e = add <2 x i32> %c, %d
+ ret <2 x i32> %e
+}
+
+define <2 x i64> @vwadd_v2i64_of_v2i8(ptr %x, ptr %y) {
+; CHECK-LABEL: vwadd_v2i64_of_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a1)
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v11, v10
+; CHECK-NEXT: ret
+ %a = load <2 x i8>, ptr %x
+ %b = load <2 x i8>, ptr %y
+ %c = sext <2 x i8> %a to <2 x i64>
+ %d = sext <2 x i8> %b to <2 x i64>
+ %e = add <2 x i64> %c, %d
+ ret <2 x i64> %e
+}
+
+define <2 x i64> @vwadd_v2i64_of_v2i16(ptr %x, ptr %y) {
+; CHECK-LABEL: vwadd_v2i64_of_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v11, v10
+; CHECK-NEXT: ret
+ %a = load <2 x i16>, ptr %x
+ %b = load <2 x i16>, ptr %y
+ %c = sext <2 x i16> %a to <2 x i64>
+ %d = sext <2 x i16> %b to <2 x i64>
+ %e = add <2 x i64> %c, %d
+ ret <2 x i64> %e
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
index e51ca9f..8779c6d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
@@ -908,3 +908,57 @@ define <4 x i64> @crash(<4 x i16> %x, <4 x i16> %y) {
%c = add <4 x i64> %a, %b
ret <4 x i64> %c
}
+
+define <2 x i32> @vwaddu_v2i32_of_v2i8(ptr %x, ptr %y) {
+; CHECK-LABEL: vwaddu_v2i32_of_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vle8.v v8, (a1)
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v11, v9
+; CHECK-NEXT: vwaddu.vv v8, v11, v10
+; CHECK-NEXT: ret
+ %a = load <2 x i8>, ptr %x
+ %b = load <2 x i8>, ptr %y
+ %c = zext <2 x i8> %a to <2 x i32>
+ %d = zext <2 x i8> %b to <2 x i32>
+ %e = add <2 x i32> %c, %d
+ ret <2 x i32> %e
+}
+
+define <2 x i64> @vwaddu_v2i64_of_v2i8(ptr %x, ptr %y) {
+; CHECK-LABEL: vwaddu_v2i64_of_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a1)
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vzext.vf4 v10, v8
+; CHECK-NEXT: vzext.vf4 v11, v9
+; CHECK-NEXT: vwaddu.vv v8, v11, v10
+; CHECK-NEXT: ret
+ %a = load <2 x i8>, ptr %x
+ %b = load <2 x i8>, ptr %y
+ %c = zext <2 x i8> %a to <2 x i64>
+ %d = zext <2 x i8> %b to <2 x i64>
+ %e = add <2 x i64> %c, %d
+ ret <2 x i64> %e
+}
+
+define <2 x i64> @vwaddu_v2i64_of_v2i16(ptr %x, ptr %y) {
+; CHECK-LABEL: vwaddu_v2i64_of_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v11, v9
+; CHECK-NEXT: vwaddu.vv v8, v11, v10
+; CHECK-NEXT: ret
+ %a = load <2 x i16>, ptr %x
+ %b = load <2 x i16>, ptr %y
+ %c = zext <2 x i16> %a to <2 x i64>
+ %d = zext <2 x i16> %b to <2 x i64>
+ %e = add <2 x i64> %c, %d
+ ret <2 x i64> %e
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
index cf00fe1..d2d5479 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
@@ -895,3 +895,57 @@ define <2 x i64> @vwsubu_vx_v2i64_i64(ptr %x, ptr %y) nounwind {
%g = sub <2 x i64> %e, %f
ret <2 x i64> %g
}
+
+define <2 x i32> @vwsubu_v2i32_of_v2i8(ptr %x, ptr %y) {
+; CHECK-LABEL: vwsubu_v2i32_of_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vle8.v v8, (a1)
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v11, v9
+; CHECK-NEXT: vwsubu.vv v8, v11, v10
+; CHECK-NEXT: ret
+ %a = load <2 x i8>, ptr %x
+ %b = load <2 x i8>, ptr %y
+ %c = zext <2 x i8> %a to <2 x i32>
+ %d = zext <2 x i8> %b to <2 x i32>
+ %e = sub <2 x i32> %c, %d
+ ret <2 x i32> %e
+}
+
+define <2 x i64> @vwsubu_v2i64_of_v2i8(ptr %x, ptr %y) {
+; CHECK-LABEL: vwsubu_v2i64_of_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a1)
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vzext.vf4 v10, v8
+; CHECK-NEXT: vzext.vf4 v11, v9
+; CHECK-NEXT: vwsubu.vv v8, v11, v10
+; CHECK-NEXT: ret
+ %a = load <2 x i8>, ptr %x
+ %b = load <2 x i8>, ptr %y
+ %c = zext <2 x i8> %a to <2 x i64>
+ %d = zext <2 x i8> %b to <2 x i64>
+ %e = sub <2 x i64> %c, %d
+ ret <2 x i64> %e
+}
+
+define <2 x i64> @vwsubu_v2i64_of_v2i16(ptr %x, ptr %y) {
+; CHECK-LABEL: vwsubu_v2i64_of_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v11, v9
+; CHECK-NEXT: vwsubu.vv v8, v11, v10
+; CHECK-NEXT: ret
+ %a = load <2 x i16>, ptr %x
+ %b = load <2 x i16>, ptr %y
+ %c = zext <2 x i16> %a to <2 x i64>
+ %d = zext <2 x i16> %b to <2 x i64>
+ %e = sub <2 x i64> %c, %d
+ ret <2 x i64> %e
+}
diff --git a/llvm/test/CodeGen/RISCV/signbit-test.ll b/llvm/test/CodeGen/RISCV/signbit-test.ll
index 69a9026..4e10fae 100644
--- a/llvm/test/CodeGen/RISCV/signbit-test.ll
+++ b/llvm/test/CodeGen/RISCV/signbit-test.ll
@@ -303,7 +303,10 @@ define i16 @test_clear_mask_i16_i8(i16 %x) nounwind {
; RV32-NEXT: bnez a1, .LBB10_2
; RV32-NEXT: # %bb.1: # %t
; RV32-NEXT: li a0, 42
-; RV32-NEXT: .LBB10_2: # %f
+; RV32-NEXT: ret
+; RV32-NEXT: .LBB10_2:
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: srli a0, a0, 16
; RV32-NEXT: ret
;
; RV64-LABEL: test_clear_mask_i16_i8:
@@ -312,7 +315,10 @@ define i16 @test_clear_mask_i16_i8(i16 %x) nounwind {
; RV64-NEXT: bnez a1, .LBB10_2
; RV64-NEXT: # %bb.1: # %t
; RV64-NEXT: li a0, 42
-; RV64-NEXT: .LBB10_2: # %f
+; RV64-NEXT: ret
+; RV64-NEXT: .LBB10_2:
+; RV64-NEXT: slli a0, a0, 48
+; RV64-NEXT: srli a0, a0, 48
; RV64-NEXT: ret
entry:
%a = and i16 %x, 128
@@ -332,7 +338,10 @@ define i16 @test_set_mask_i16_i8(i16 %x) nounwind {
; RV32-NEXT: beqz a1, .LBB11_2
; RV32-NEXT: # %bb.1: # %t
; RV32-NEXT: li a0, 42
-; RV32-NEXT: .LBB11_2: # %f
+; RV32-NEXT: ret
+; RV32-NEXT: .LBB11_2:
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: srli a0, a0, 16
; RV32-NEXT: ret
;
; RV64-LABEL: test_set_mask_i16_i8:
@@ -341,7 +350,10 @@ define i16 @test_set_mask_i16_i8(i16 %x) nounwind {
; RV64-NEXT: beqz a1, .LBB11_2
; RV64-NEXT: # %bb.1: # %t
; RV64-NEXT: li a0, 42
-; RV64-NEXT: .LBB11_2: # %f
+; RV64-NEXT: ret
+; RV64-NEXT: .LBB11_2:
+; RV64-NEXT: slli a0, a0, 48
+; RV64-NEXT: srli a0, a0, 48
; RV64-NEXT: ret
entry:
%a = and i16 %x, 128
@@ -361,7 +373,10 @@ define i16 @test_set_mask_i16_i7(i16 %x) nounwind {
; RV32-NEXT: beqz a1, .LBB12_2
; RV32-NEXT: # %bb.1: # %t
; RV32-NEXT: li a0, 42
-; RV32-NEXT: .LBB12_2: # %f
+; RV32-NEXT: ret
+; RV32-NEXT: .LBB12_2:
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: srli a0, a0, 16
; RV32-NEXT: ret
;
; RV64-LABEL: test_set_mask_i16_i7:
@@ -370,7 +385,10 @@ define i16 @test_set_mask_i16_i7(i16 %x) nounwind {
; RV64-NEXT: beqz a1, .LBB12_2
; RV64-NEXT: # %bb.1: # %t
; RV64-NEXT: li a0, 42
-; RV64-NEXT: .LBB12_2: # %f
+; RV64-NEXT: ret
+; RV64-NEXT: .LBB12_2:
+; RV64-NEXT: slli a0, a0, 48
+; RV64-NEXT: srli a0, a0, 48
; RV64-NEXT: ret
entry:
%a = and i16 %x, 64
diff --git a/llvm/test/CodeGen/RISCV/signed-truncation-check.ll b/llvm/test/CodeGen/RISCV/signed-truncation-check.ll
index 0860853..de36bcd 100644
--- a/llvm/test/CodeGen/RISCV/signed-truncation-check.ll
+++ b/llvm/test/CodeGen/RISCV/signed-truncation-check.ll
@@ -254,23 +254,43 @@ define i1 @shifts_eqcmp_i64_i8(i64 %x) nounwind {
; ---------------------------------------------------------------------------- ;
define i1 @add_ugecmp_i16_i8(i16 %x) nounwind {
-; RV32-LABEL: add_ugecmp_i16_i8:
-; RV32: # %bb.0:
-; RV32-NEXT: addi a0, a0, -128
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: srli a0, a0, 24
-; RV32-NEXT: sltiu a0, a0, 255
-; RV32-NEXT: xori a0, a0, 1
-; RV32-NEXT: ret
+; RV32I-LABEL: add_ugecmp_i16_i8:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a0, a0, 16
+; RV32I-NEXT: srli a0, a0, 16
+; RV32I-NEXT: addi a0, a0, -128
+; RV32I-NEXT: srli a0, a0, 8
+; RV32I-NEXT: sltiu a0, a0, 255
+; RV32I-NEXT: xori a0, a0, 1
+; RV32I-NEXT: ret
;
-; RV64-LABEL: add_ugecmp_i16_i8:
-; RV64: # %bb.0:
-; RV64-NEXT: addi a0, a0, -128
-; RV64-NEXT: slli a0, a0, 48
-; RV64-NEXT: srli a0, a0, 56
-; RV64-NEXT: sltiu a0, a0, 255
-; RV64-NEXT: xori a0, a0, 1
-; RV64-NEXT: ret
+; RV64I-LABEL: add_ugecmp_i16_i8:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 48
+; RV64I-NEXT: srli a0, a0, 48
+; RV64I-NEXT: addi a0, a0, -128
+; RV64I-NEXT: srli a0, a0, 8
+; RV64I-NEXT: sltiu a0, a0, 255
+; RV64I-NEXT: xori a0, a0, 1
+; RV64I-NEXT: ret
+;
+; RV32ZBB-LABEL: add_ugecmp_i16_i8:
+; RV32ZBB: # %bb.0:
+; RV32ZBB-NEXT: zext.h a0, a0
+; RV32ZBB-NEXT: addi a0, a0, -128
+; RV32ZBB-NEXT: srli a0, a0, 8
+; RV32ZBB-NEXT: sltiu a0, a0, 255
+; RV32ZBB-NEXT: xori a0, a0, 1
+; RV32ZBB-NEXT: ret
+;
+; RV64ZBB-LABEL: add_ugecmp_i16_i8:
+; RV64ZBB: # %bb.0:
+; RV64ZBB-NEXT: zext.h a0, a0
+; RV64ZBB-NEXT: addi a0, a0, -128
+; RV64ZBB-NEXT: srli a0, a0, 8
+; RV64ZBB-NEXT: sltiu a0, a0, 255
+; RV64ZBB-NEXT: xori a0, a0, 1
+; RV64ZBB-NEXT: ret
%tmp0 = add i16 %x, -128 ; ~0U << (8-1)
%tmp1 = icmp uge i16 %tmp0, -256 ; ~0U << 8
ret i1 %tmp1
@@ -471,23 +491,43 @@ define i1 @add_ugecmp_i64_i8(i64 %x) nounwind {
; Slightly more canonical variant
define i1 @add_ugtcmp_i16_i8(i16 %x) nounwind {
-; RV32-LABEL: add_ugtcmp_i16_i8:
-; RV32: # %bb.0:
-; RV32-NEXT: addi a0, a0, -128
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: srli a0, a0, 24
-; RV32-NEXT: sltiu a0, a0, 255
-; RV32-NEXT: xori a0, a0, 1
-; RV32-NEXT: ret
+; RV32I-LABEL: add_ugtcmp_i16_i8:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a0, a0, 16
+; RV32I-NEXT: srli a0, a0, 16
+; RV32I-NEXT: addi a0, a0, -128
+; RV32I-NEXT: srli a0, a0, 8
+; RV32I-NEXT: sltiu a0, a0, 255
+; RV32I-NEXT: xori a0, a0, 1
+; RV32I-NEXT: ret
;
-; RV64-LABEL: add_ugtcmp_i16_i8:
-; RV64: # %bb.0:
-; RV64-NEXT: addi a0, a0, -128
-; RV64-NEXT: slli a0, a0, 48
-; RV64-NEXT: srli a0, a0, 56
-; RV64-NEXT: sltiu a0, a0, 255
-; RV64-NEXT: xori a0, a0, 1
-; RV64-NEXT: ret
+; RV64I-LABEL: add_ugtcmp_i16_i8:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 48
+; RV64I-NEXT: srli a0, a0, 48
+; RV64I-NEXT: addi a0, a0, -128
+; RV64I-NEXT: srli a0, a0, 8
+; RV64I-NEXT: sltiu a0, a0, 255
+; RV64I-NEXT: xori a0, a0, 1
+; RV64I-NEXT: ret
+;
+; RV32ZBB-LABEL: add_ugtcmp_i16_i8:
+; RV32ZBB: # %bb.0:
+; RV32ZBB-NEXT: zext.h a0, a0
+; RV32ZBB-NEXT: addi a0, a0, -128
+; RV32ZBB-NEXT: srli a0, a0, 8
+; RV32ZBB-NEXT: sltiu a0, a0, 255
+; RV32ZBB-NEXT: xori a0, a0, 1
+; RV32ZBB-NEXT: ret
+;
+; RV64ZBB-LABEL: add_ugtcmp_i16_i8:
+; RV64ZBB: # %bb.0:
+; RV64ZBB-NEXT: zext.h a0, a0
+; RV64ZBB-NEXT: addi a0, a0, -128
+; RV64ZBB-NEXT: srli a0, a0, 8
+; RV64ZBB-NEXT: sltiu a0, a0, 255
+; RV64ZBB-NEXT: xori a0, a0, 1
+; RV64ZBB-NEXT: ret
%tmp0 = add i16 %x, -128 ; ~0U << (8-1)
%tmp1 = icmp ugt i16 %tmp0, -257 ; ~0U << 8 - 1
ret i1 %tmp1
diff --git a/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll b/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll
new file mode 100644
index 0000000..3740dc6
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll
@@ -0,0 +1,387 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+m %s -o - | FileCheck %s
+
+define zeroext i16 @overflow_add(i16 zeroext %a, i16 zeroext %b) {
+; CHECK-LABEL: overflow_add:
+; CHECK: # %bb.0:
+; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: ori a0, a0, 1
+; CHECK-NEXT: slli a0, a0, 48
+; CHECK-NEXT: srli a1, a0, 48
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: li a0, 2
+; CHECK-NEXT: bltu a2, a1, .LBB0_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 5
+; CHECK-NEXT: .LBB0_2:
+; CHECK-NEXT: ret
+ %add = add i16 %b, %a
+ %or = or i16 %add, 1
+ %cmp = icmp ugt i16 %or, 1024
+ %res = select i1 %cmp, i16 2, i16 5
+ ret i16 %res
+}
+
+define zeroext i16 @overflow_sub(i16 zeroext %a, i16 zeroext %b) {
+; CHECK-LABEL: overflow_sub:
+; CHECK: # %bb.0:
+; CHECK-NEXT: subw a0, a0, a1
+; CHECK-NEXT: ori a0, a0, 1
+; CHECK-NEXT: slli a0, a0, 48
+; CHECK-NEXT: srli a1, a0, 48
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: li a0, 2
+; CHECK-NEXT: bltu a2, a1, .LBB1_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 5
+; CHECK-NEXT: .LBB1_2:
+; CHECK-NEXT: ret
+ %add = sub i16 %a, %b
+ %or = or i16 %add, 1
+ %cmp = icmp ugt i16 %or, 1024
+ %res = select i1 %cmp, i16 2, i16 5
+ ret i16 %res
+}
+
+define zeroext i16 @overflow_mul(i16 zeroext %a, i16 zeroext %b) {
+; CHECK-LABEL: overflow_mul:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mul a0, a1, a0
+; CHECK-NEXT: ori a0, a0, 1
+; CHECK-NEXT: slli a0, a0, 48
+; CHECK-NEXT: srli a1, a0, 48
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: li a0, 2
+; CHECK-NEXT: bltu a2, a1, .LBB2_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 5
+; CHECK-NEXT: .LBB2_2:
+; CHECK-NEXT: ret
+ %add = mul i16 %b, %a
+ %or = or i16 %add, 1
+ %cmp = icmp ugt i16 %or, 1024
+ %res = select i1 %cmp, i16 2, i16 5
+ ret i16 %res
+}
+
+define zeroext i16 @overflow_shl(i16 zeroext %a, i16 zeroext %b) {
+; CHECK-LABEL: overflow_shl:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sll a0, a0, a1
+; CHECK-NEXT: ori a0, a0, 1
+; CHECK-NEXT: slli a0, a0, 48
+; CHECK-NEXT: srli a1, a0, 48
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: li a0, 2
+; CHECK-NEXT: bltu a2, a1, .LBB3_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 5
+; CHECK-NEXT: .LBB3_2:
+; CHECK-NEXT: ret
+ %add = shl i16 %a, %b
+ %or = or i16 %add, 1
+ %cmp = icmp ugt i16 %or, 1024
+ %res = select i1 %cmp, i16 2, i16 5
+ ret i16 %res
+}
+
+define i32 @overflow_add_no_consts(i8 zeroext %a, i8 zeroext %b, i8 zeroext %limit) {
+; CHECK-LABEL: overflow_add_no_consts:
+; CHECK: # %bb.0:
+; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: andi a1, a0, 255
+; CHECK-NEXT: li a0, 8
+; CHECK-NEXT: bltu a2, a1, .LBB4_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 16
+; CHECK-NEXT: .LBB4_2:
+; CHECK-NEXT: ret
+ %add = add i8 %b, %a
+ %cmp = icmp ugt i8 %add, %limit
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+define i32 @overflow_add_const_limit(i8 zeroext %a, i8 zeroext %b) {
+; CHECK-LABEL: overflow_add_const_limit:
+; CHECK: # %bb.0:
+; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: andi a1, a0, 255
+; CHECK-NEXT: li a2, 128
+; CHECK-NEXT: li a0, 8
+; CHECK-NEXT: bltu a2, a1, .LBB5_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 16
+; CHECK-NEXT: .LBB5_2:
+; CHECK-NEXT: ret
+ %add = add i8 %b, %a
+ %cmp = icmp ugt i8 %add, -128
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+define i32 @overflow_add_positive_const_limit(i8 zeroext %a) {
+; CHECK-LABEL: overflow_add_positive_const_limit:
+; CHECK: # %bb.0:
+; CHECK-NEXT: slli a0, a0, 56
+; CHECK-NEXT: srai a1, a0, 56
+; CHECK-NEXT: li a2, -1
+; CHECK-NEXT: li a0, 8
+; CHECK-NEXT: blt a1, a2, .LBB6_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 16
+; CHECK-NEXT: .LBB6_2:
+; CHECK-NEXT: ret
+ %cmp = icmp slt i8 %a, -1
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+define i32 @unsafe_add_underflow(i8 zeroext %a) {
+; CHECK-LABEL: unsafe_add_underflow:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: li a2, 1
+; CHECK-NEXT: li a0, 8
+; CHECK-NEXT: beq a1, a2, .LBB7_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 16
+; CHECK-NEXT: .LBB7_2:
+; CHECK-NEXT: ret
+ %cmp = icmp eq i8 %a, 1
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+define i32 @safe_add_underflow(i8 zeroext %a) {
+; CHECK-LABEL: safe_add_underflow:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: li a0, 8
+; CHECK-NEXT: beqz a1, .LBB8_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 16
+; CHECK-NEXT: .LBB8_2:
+; CHECK-NEXT: ret
+ %cmp = icmp eq i8 %a, 0
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+define i32 @safe_add_underflow_neg(i8 zeroext %a) {
+; CHECK-LABEL: safe_add_underflow_neg:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a1, a0, -2
+; CHECK-NEXT: li a2, 251
+; CHECK-NEXT: li a0, 8
+; CHECK-NEXT: bltu a1, a2, .LBB9_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 16
+; CHECK-NEXT: .LBB9_2:
+; CHECK-NEXT: ret
+ %add = add i8 %a, -2
+ %cmp = icmp ult i8 %add, -5
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+define i32 @overflow_sub_negative_const_limit(i8 zeroext %a) {
+; CHECK-LABEL: overflow_sub_negative_const_limit:
+; CHECK: # %bb.0:
+; CHECK-NEXT: slli a0, a0, 56
+; CHECK-NEXT: srai a1, a0, 56
+; CHECK-NEXT: li a2, -1
+; CHECK-NEXT: li a0, 8
+; CHECK-NEXT: blt a1, a2, .LBB10_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 16
+; CHECK-NEXT: .LBB10_2:
+; CHECK-NEXT: ret
+ %cmp = icmp slt i8 %a, -1
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+; This is valid so long as the icmp immediate is sext.
+define i32 @sext_sub_underflow(i8 zeroext %a) {
+; CHECK-LABEL: sext_sub_underflow:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a1, a0, -6
+; CHECK-NEXT: li a2, -6
+; CHECK-NEXT: li a0, 8
+; CHECK-NEXT: bltu a2, a1, .LBB11_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 16
+; CHECK-NEXT: .LBB11_2:
+; CHECK-NEXT: ret
+ %sub = add i8 %a, -6
+ %cmp = icmp ugt i8 %sub, -6
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+define i32 @safe_sub_underflow(i8 zeroext %a) {
+; CHECK-LABEL: safe_sub_underflow:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: li a0, 16
+; CHECK-NEXT: beqz a1, .LBB12_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 8
+; CHECK-NEXT: .LBB12_2:
+; CHECK-NEXT: ret
+ %cmp.not = icmp eq i8 %a, 0
+ %res = select i1 %cmp.not, i32 16, i32 8
+ ret i32 %res
+}
+
+define i32 @safe_sub_underflow_neg(i8 zeroext %a) {
+; CHECK-LABEL: safe_sub_underflow_neg:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a1, a0, -4
+; CHECK-NEXT: li a2, 250
+; CHECK-NEXT: li a0, 8
+; CHECK-NEXT: bltu a2, a1, .LBB13_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 16
+; CHECK-NEXT: .LBB13_2:
+; CHECK-NEXT: ret
+ %sub = add i8 %a, -4
+ %cmp = icmp ugt i8 %sub, -6
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+; This is valid so long as the icmp immediate is sext.
+define i32 @sext_sub_underflow_neg(i8 zeroext %a) {
+; CHECK-LABEL: sext_sub_underflow_neg:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a1, a0, -4
+; CHECK-NEXT: li a2, -3
+; CHECK-NEXT: li a0, 8
+; CHECK-NEXT: bltu a1, a2, .LBB14_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 16
+; CHECK-NEXT: .LBB14_2:
+; CHECK-NEXT: ret
+ %sub = add i8 %a, -4
+ %cmp = icmp ult i8 %sub, -3
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+define i32 @safe_sub_imm_var(ptr nocapture readonly %b) local_unnamed_addr #1 {
+; CHECK-LABEL: safe_sub_imm_var:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a0, 0
+; CHECK-NEXT: ret
+entry:
+ ret i32 0
+}
+
+define i32 @safe_sub_var_imm(ptr nocapture readonly %b) local_unnamed_addr #1 {
+; CHECK-LABEL: safe_sub_var_imm:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lbu a0, 0(a0)
+; CHECK-NEXT: addi a0, a0, 8
+; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: sltiu a0, a0, 253
+; CHECK-NEXT: xori a0, a0, 1
+; CHECK-NEXT: ret
+entry:
+ %0 = load i8, ptr %b, align 1
+ %sub = add nsw i8 %0, 8
+ %cmp = icmp ugt i8 %sub, -4
+ %conv4 = zext i1 %cmp to i32
+ ret i32 %conv4
+}
+
+define i32 @safe_add_imm_var(ptr nocapture readnone %b) {
+; CHECK-LABEL: safe_add_imm_var:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a0, 1
+; CHECK-NEXT: ret
+entry:
+ ret i32 1
+}
+
+define i32 @safe_add_var_imm(ptr nocapture readnone %b) {
+; CHECK-LABEL: safe_add_var_imm:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a0, 1
+; CHECK-NEXT: ret
+entry:
+ ret i32 1
+}
+
+define i8 @convert_add_order(i8 zeroext %arg) {
+; CHECK-LABEL: convert_add_order:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ori a1, a0, 1
+; CHECK-NEXT: li a2, 50
+; CHECK-NEXT: bltu a1, a2, .LBB19_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a1, 255
+; CHECK-NEXT: and a0, a1, a0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB19_2:
+; CHECK-NEXT: addi a1, a1, -40
+; CHECK-NEXT: sltiu a1, a1, 20
+; CHECK-NEXT: li a2, 2
+; CHECK-NEXT: sub a1, a2, a1
+; CHECK-NEXT: and a0, a1, a0
+; CHECK-NEXT: ret
+ %shl = or i8 %arg, 1
+ %cmp.0 = icmp ult i8 %shl, 50
+ %sub = add nsw i8 %shl, -40
+ %cmp.1 = icmp ult i8 %sub, 20
+ %mask.sel.v = select i1 %cmp.1, i8 1, i8 2
+ %mask.sel = select i1 %cmp.0, i8 %mask.sel.v, i8 -1
+ %res = and i8 %mask.sel, %arg
+ ret i8 %res
+}
+
+define i8 @underflow_if_sub(i32 %arg, i8 zeroext %arg1) {
+; CHECK-LABEL: underflow_if_sub:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sext.w a2, a0
+; CHECK-NEXT: sgtz a2, a2
+; CHECK-NEXT: and a0, a2, a0
+; CHECK-NEXT: addi a0, a0, 245
+; CHECK-NEXT: bltu a0, a1, .LBB20_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 100
+; CHECK-NEXT: .LBB20_2:
+; CHECK-NEXT: ret
+ %cmp = icmp sgt i32 %arg, 0
+ %conv = zext i1 %cmp to i32
+ %and = and i32 %conv, %arg
+ %trunc = trunc i32 %and to i8
+ %conv1 = add nuw nsw i8 %trunc, -11
+ %cmp.1 = icmp ult i8 %conv1, %arg1
+ %res = select i1 %cmp.1, i8 %conv1, i8 100
+ ret i8 %res
+}
+
+define i8 @underflow_if_sub_signext(i32 %arg, i8 signext %arg1) {
+; CHECK-LABEL: underflow_if_sub_signext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sext.w a2, a0
+; CHECK-NEXT: andi a1, a1, 255
+; CHECK-NEXT: sgtz a2, a2
+; CHECK-NEXT: and a0, a2, a0
+; CHECK-NEXT: addi a0, a0, 245
+; CHECK-NEXT: bltu a0, a1, .LBB21_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 100
+; CHECK-NEXT: .LBB21_2:
+; CHECK-NEXT: ret
+ %cmp = icmp sgt i32 %arg, 0
+ %conv = zext i1 %cmp to i32
+ %and = and i32 %conv, %arg
+ %trunc = trunc i32 %and to i8
+ %conv1 = add nuw nsw i8 %trunc, -11
+ %cmp.1 = icmp ult i8 %conv1, %arg1
+ %res = select i1 %cmp.1, i8 %conv1, i8 100
+ ret i8 %res
+}
diff --git a/llvm/test/CodeGen/SPARC/64atomics.ll b/llvm/test/CodeGen/SPARC/64atomics.ll
index 2c00f95..feb37fd 100644
--- a/llvm/test/CodeGen/SPARC/64atomics.ll
+++ b/llvm/test/CodeGen/SPARC/64atomics.ll
@@ -1,12 +1,14 @@
-; RUN: llc < %s -march=sparcv9 -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=sparc -mcpu=v9 -verify-machineinstrs | FileCheck %s --check-prefixes=SPARC,SPARC32
+; RUN: llc < %s -march=sparcv9 -verify-machineinstrs | FileCheck %s --check-prefixes=SPARC,SPARC64
-; CHECK-LABEL: test_atomic_i64
-; CHECK: ldx [%o0]
-; CHECK: membar
-; CHECK: ldx [%o1]
-; CHECK: membar
-; CHECK: membar
-; CHECK: stx {{.+}}, [%o2]
+; SPARC-LABEL: test_atomic_i64
+; SPARC32: __atomic_load_8
+; SPARC64: ldx [%o0]
+; SPARC64: membar
+; SPARC64: ldx [%o1]
+; SPARC64: membar
+; SPARC64: membar
+; SPARC64: stx {{.+}}, [%o2]
define i64 @test_atomic_i64(ptr %ptr1, ptr %ptr2, ptr %ptr3) {
entry:
%0 = load atomic i64, ptr %ptr1 acquire, align 8
@@ -16,9 +18,10 @@ entry:
ret i64 %2
}
-; CHECK-LABEL: test_cmpxchg_i64
-; CHECK: mov 123, [[R:%[gilo][0-7]]]
-; CHECK: casx [%o1], %o0, [[R]]
+; SPARC-LABEL: test_cmpxchg_i64
+; SPARC32: __atomic_compare_exchange_8
+; SPARC64: mov 123, [[R:%[gilo][0-7]]]
+; SPARC64: casx [%o1], %o0, [[R]]
define i64 @test_cmpxchg_i64(i64 %a, ptr %ptr) {
entry:
@@ -27,8 +30,9 @@ entry:
ret i64 %b
}
-; CHECK-LABEL: test_swap_i64
-; CHECK: casx [%o1],
+; SPARC-LABEL: test_swap_i64
+; SPARC32: __atomic_exchange_8
+; SPARC64: casx [%o1],
define i64 @test_swap_i64(i64 %a, ptr %ptr) {
entry:
@@ -36,23 +40,25 @@ entry:
ret i64 %b
}
-; CHECK-LABEL: test_load_sub_64
-; CHECK: membar
-; CHECK: sub
-; CHECK: casx [%o0]
-; CHECK: membar
+; SPARC-LABEL: test_load_sub_64
+; SPARC32: __atomic_fetch_sub_8
+; SPARC64: membar
+; SPARC64: sub
+; SPARC64: casx [%o0]
+; SPARC64: membar
define zeroext i64 @test_load_sub_64(ptr %p, i64 zeroext %v) {
entry:
%0 = atomicrmw sub ptr %p, i64 %v seq_cst
ret i64 %0
}
-; CHECK-LABEL: test_load_max_64
-; CHECK: membar
-; CHECK: cmp
-; CHECK: movg %xcc
-; CHECK: casx [%o0]
-; CHECK: membar
+; SPARC-LABEL: test_load_max_64
+; SPARC32: __atomic_compare_exchange_8
+; SPARC64: membar
+; SPARC64: cmp
+; SPARC64: movg %xcc
+; SPARC64: casx [%o0]
+; SPARC64: membar
define zeroext i64 @test_load_max_64(ptr %p, i64 zeroext %v) {
entry:
%0 = atomicrmw max ptr %p, i64 %v seq_cst
diff --git a/llvm/test/CodeGen/SPARC/atomicrmw-uinc-udec-wrap.ll b/llvm/test/CodeGen/SPARC/atomicrmw-uinc-udec-wrap.ll
index 9b49035..0f9feeb 100644
--- a/llvm/test/CodeGen/SPARC/atomicrmw-uinc-udec-wrap.ll
+++ b/llvm/test/CodeGen/SPARC/atomicrmw-uinc-udec-wrap.ll
@@ -117,43 +117,41 @@ define i64 @atomicrmw_uinc_wrap_i64(ptr %ptr, i64 %val) {
; CHECK-LABEL: atomicrmw_uinc_wrap_i64:
; CHECK: .cfi_startproc
; CHECK-NEXT: ! %bb.0:
-; CHECK-NEXT: save %sp, -96, %sp
+; CHECK-NEXT: save %sp, -104, %sp
; CHECK-NEXT: .cfi_def_cfa_register %fp
; CHECK-NEXT: .cfi_window_save
; CHECK-NEXT: .cfi_register %o7, %i7
-; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
-; CHECK-NEXT: ldd [%i0], %i4
+; CHECK-NEXT: ldd [%i0], %g2
+; CHECK-NEXT: add %fp, -8, %i3
+; CHECK-NEXT: mov 5, %i4
; CHECK-NEXT: .LBB3_1: ! %atomicrmw.start
; CHECK-NEXT: ! =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: mov %g0, %i3
-; CHECK-NEXT: mov %g0, %g2
-; CHECK-NEXT: addcc %i5, 1, %o4
-; CHECK-NEXT: addxcc %i4, 0, %o3
-; CHECK-NEXT: cmp %i4, %i1
-; CHECK-NEXT: movcc %icc, 1, %i3
-; CHECK-NEXT: cmp %i5, %i2
-; CHECK-NEXT: movcc %icc, 1, %g2
-; CHECK-NEXT: cmp %i4, %i1
-; CHECK-NEXT: move %icc, %g2, %i3
-; CHECK-NEXT: cmp %i3, 0
+; CHECK-NEXT: mov %g0, %i5
+; CHECK-NEXT: mov %g0, %g4
+; CHECK-NEXT: addcc %g3, 1, %o3
+; CHECK-NEXT: addxcc %g2, 0, %o2
+; CHECK-NEXT: cmp %g2, %i1
+; CHECK-NEXT: movcc %icc, 1, %i5
+; CHECK-NEXT: cmp %g3, %i2
+; CHECK-NEXT: movcc %icc, 1, %g4
+; CHECK-NEXT: cmp %g2, %i1
+; CHECK-NEXT: move %icc, %g4, %i5
+; CHECK-NEXT: cmp %i5, 0
+; CHECK-NEXT: movne %icc, 0, %o2
; CHECK-NEXT: movne %icc, 0, %o3
-; CHECK-NEXT: movne %icc, 0, %o4
+; CHECK-NEXT: std %g2, [%fp+-8]
; CHECK-NEXT: mov %i0, %o0
-; CHECK-NEXT: mov %i4, %o1
-; CHECK-NEXT: call __sync_val_compare_and_swap_8
-; CHECK-NEXT: mov %i5, %o2
-; CHECK-NEXT: xor %o0, %i4, %i3
-; CHECK-NEXT: xor %o1, %i5, %i4
-; CHECK-NEXT: or %i4, %i3, %i3
-; CHECK-NEXT: mov %o1, %i5
-; CHECK-NEXT: cmp %i3, 0
-; CHECK-NEXT: bne %icc, .LBB3_1
-; CHECK-NEXT: mov %o0, %i4
+; CHECK-NEXT: mov %i3, %o1
+; CHECK-NEXT: mov %i4, %o4
+; CHECK-NEXT: call __atomic_compare_exchange_8
+; CHECK-NEXT: mov %i4, %o5
+; CHECK-NEXT: cmp %o0, 0
+; CHECK-NEXT: be %icc, .LBB3_1
+; CHECK-NEXT: ldd [%fp+-8], %g2
; CHECK-NEXT: ! %bb.2: ! %atomicrmw.end
-; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
-; CHECK-NEXT: mov %i4, %i0
+; CHECK-NEXT: mov %g2, %i0
; CHECK-NEXT: ret
-; CHECK-NEXT: restore %g0, %i5, %o1
+; CHECK-NEXT: restore %g0, %g3, %o1
%result = atomicrmw uinc_wrap ptr %ptr, i64 %val seq_cst
ret i64 %result
}
@@ -280,48 +278,46 @@ define i64 @atomicrmw_udec_wrap_i64(ptr %ptr, i64 %val) {
; CHECK-LABEL: atomicrmw_udec_wrap_i64:
; CHECK: .cfi_startproc
; CHECK-NEXT: ! %bb.0:
-; CHECK-NEXT: save %sp, -96, %sp
+; CHECK-NEXT: save %sp, -104, %sp
; CHECK-NEXT: .cfi_def_cfa_register %fp
; CHECK-NEXT: .cfi_window_save
; CHECK-NEXT: .cfi_register %o7, %i7
-; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
-; CHECK-NEXT: ldd [%i0], %i4
+; CHECK-NEXT: ldd [%i0], %g2
+; CHECK-NEXT: add %fp, -8, %i3
+; CHECK-NEXT: mov 5, %i4
; CHECK-NEXT: .LBB7_1: ! %atomicrmw.start
; CHECK-NEXT: ! =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: mov %g0, %i3
-; CHECK-NEXT: mov %g0, %g2
-; CHECK-NEXT: mov %g0, %g3
-; CHECK-NEXT: addcc %i5, -1, %o4
-; CHECK-NEXT: addxcc %i4, -1, %o3
-; CHECK-NEXT: or %i5, %i4, %g4
-; CHECK-NEXT: cmp %g4, 0
-; CHECK-NEXT: move %icc, 1, %i3
-; CHECK-NEXT: cmp %i4, %i1
-; CHECK-NEXT: movgu %icc, 1, %g2
-; CHECK-NEXT: cmp %i5, %i2
-; CHECK-NEXT: movgu %icc, 1, %g3
-; CHECK-NEXT: cmp %i4, %i1
-; CHECK-NEXT: move %icc, %g3, %g2
-; CHECK-NEXT: or %i3, %g2, %i3
-; CHECK-NEXT: cmp %i3, 0
-; CHECK-NEXT: movne %icc, %i1, %o3
-; CHECK-NEXT: movne %icc, %i2, %o4
+; CHECK-NEXT: mov %g0, %i5
+; CHECK-NEXT: mov %g0, %g4
+; CHECK-NEXT: mov %g0, %l0
+; CHECK-NEXT: addcc %g3, -1, %o3
+; CHECK-NEXT: addxcc %g2, -1, %o2
+; CHECK-NEXT: or %g3, %g2, %l1
+; CHECK-NEXT: cmp %l1, 0
+; CHECK-NEXT: move %icc, 1, %i5
+; CHECK-NEXT: cmp %g2, %i1
+; CHECK-NEXT: movgu %icc, 1, %g4
+; CHECK-NEXT: cmp %g3, %i2
+; CHECK-NEXT: movgu %icc, 1, %l0
+; CHECK-NEXT: cmp %g2, %i1
+; CHECK-NEXT: move %icc, %l0, %g4
+; CHECK-NEXT: or %i5, %g4, %i5
+; CHECK-NEXT: cmp %i5, 0
+; CHECK-NEXT: movne %icc, %i1, %o2
+; CHECK-NEXT: movne %icc, %i2, %o3
+; CHECK-NEXT: std %g2, [%fp+-8]
; CHECK-NEXT: mov %i0, %o0
-; CHECK-NEXT: mov %i4, %o1
-; CHECK-NEXT: call __sync_val_compare_and_swap_8
-; CHECK-NEXT: mov %i5, %o2
-; CHECK-NEXT: xor %o0, %i4, %i3
-; CHECK-NEXT: xor %o1, %i5, %i4
-; CHECK-NEXT: or %i4, %i3, %i3
-; CHECK-NEXT: mov %o1, %i5
-; CHECK-NEXT: cmp %i3, 0
-; CHECK-NEXT: bne %icc, .LBB7_1
-; CHECK-NEXT: mov %o0, %i4
+; CHECK-NEXT: mov %i3, %o1
+; CHECK-NEXT: mov %i4, %o4
+; CHECK-NEXT: call __atomic_compare_exchange_8
+; CHECK-NEXT: mov %i4, %o5
+; CHECK-NEXT: cmp %o0, 0
+; CHECK-NEXT: be %icc, .LBB7_1
+; CHECK-NEXT: ldd [%fp+-8], %g2
; CHECK-NEXT: ! %bb.2: ! %atomicrmw.end
-; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore
-; CHECK-NEXT: mov %i4, %i0
+; CHECK-NEXT: mov %g2, %i0
; CHECK-NEXT: ret
-; CHECK-NEXT: restore %g0, %i5, %o1
+; CHECK-NEXT: restore %g0, %g3, %o1
%result = atomicrmw udec_wrap ptr %ptr, i64 %val seq_cst
ret i64 %result
}
diff --git a/llvm/test/CodeGen/WebAssembly/wasm-eh-prepare.ll b/llvm/test/CodeGen/WebAssembly/wasm-eh-prepare.ll
index bd577e3..164c138 100644
--- a/llvm/test/CodeGen/WebAssembly/wasm-eh-prepare.ll
+++ b/llvm/test/CodeGen/WebAssembly/wasm-eh-prepare.ll
@@ -2,6 +2,7 @@
; RUN: opt < %s -win-eh-prepare -demote-catchswitch-only -wasm-eh-prepare -S --mattr=+atomics,+bulk-memory | FileCheck %s
; RUN: opt < %s -passes='win-eh-prepare<demote-catchswitch-only>,wasm-eh-prepare' -S | FileCheck %s
; RUN: opt < %s -passes='win-eh-prepare<demote-catchswitch-only>,wasm-eh-prepare' -S --mattr=+atomics,+bulk-memory | FileCheck %s
+; RUN: llc < %s -wasm-enable-eh -exception-model=wasm -mattr=+exception-handling -stop-after=wasm-eh-prepare | FileCheck %s
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
target triple = "wasm32-unknown-unknown"
@@ -245,7 +246,6 @@ bb.true: ; preds = %entry
bb.true.0: ; preds = %bb.true
br label %merge
-; CHECK: bb.false
bb.false: ; preds = %entry
br label %merge
diff --git a/llvm/test/CodeGen/X86/fold-sext-trunc.ll b/llvm/test/CodeGen/X86/fold-sext-trunc.ll
index 9963bb7..26a4ed0 100644
--- a/llvm/test/CodeGen/X86/fold-sext-trunc.ll
+++ b/llvm/test/CodeGen/X86/fold-sext-trunc.ll
@@ -18,7 +18,7 @@ define void @int322(i32 %foo) !dbg !5 {
entry:
%val = load i64, ptr @g_10, !dbg !16
%0 = load i32, ptr getelementptr inbounds (%struct.S1, ptr @g_10, i32 0, i32 1), align 4, !dbg !17
-; MIR: renamable {{\$r[a-z]+}} = MOVSX64rm32 {{.*}}, @g_10 + 4,{{.*}} debug-location !17 :: (dereferenceable load (s32) from @g_10 + 4)
+; MIR: renamable {{\$r[a-z]+}} = MOVSX64rm32 {{.*}}, @g_10 + 4,{{.*}} debug-location !17 :: (dereferenceable load (s32) from @g_10 + 4, basealign 8)
%1 = sext i32 %0 to i64, !dbg !18
%tmp4.i = lshr i64 %val, 32, !dbg !19
%tmp5.i = trunc i64 %tmp4.i to i32, !dbg !20
diff --git a/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll b/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll
index 47234c3..f1d473f 100644
--- a/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll
+++ b/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -O2 -mtriple=x86_64-linux-android \
; RUN: -enable-legalize-types-checking \
-; RUN: | FileCheck %s
+; RUN: | FileCheck %s --check-prefix=ANDROID
; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu \
; RUN: -enable-legalize-types-checking \
-; RUN: | FileCheck %s
+; RUN: | FileCheck %s --check-prefix=GNU
; RUN: llc < %s -O2 -mtriple=i686-linux-gnu -mattr=+sse2 \
; RUN: -enable-legalize-types-checking \
; RUN: | FileCheck %s --check-prefix=X86
@@ -19,6 +19,20 @@ define fp128 @add(fp128 %x, fp128 %y) nounwind strictfp {
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
;
+; ANDROID-LABEL: add:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq __addtf3@PLT
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: add:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq __addtf3@PLT
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
+;
; X86-LABEL: add:
; X86: # %bb.0: # %entry
; X86-NEXT: pushl %esi
@@ -56,6 +70,20 @@ define fp128 @sub(fp128 %x, fp128 %y) nounwind strictfp {
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
;
+; ANDROID-LABEL: sub:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq __subtf3@PLT
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: sub:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq __subtf3@PLT
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
+;
; X86-LABEL: sub:
; X86: # %bb.0: # %entry
; X86-NEXT: pushl %esi
@@ -93,6 +121,20 @@ define fp128 @mul(fp128 %x, fp128 %y) nounwind strictfp {
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
;
+; ANDROID-LABEL: mul:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq __multf3@PLT
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: mul:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq __multf3@PLT
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
+;
; X86-LABEL: mul:
; X86: # %bb.0: # %entry
; X86-NEXT: pushl %esi
@@ -130,6 +172,20 @@ define fp128 @div(fp128 %x, fp128 %y) nounwind strictfp {
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
;
+; ANDROID-LABEL: div:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq __divtf3@PLT
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: div:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq __divtf3@PLT
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
+;
; X86-LABEL: div:
; X86: # %bb.0: # %entry
; X86-NEXT: pushl %esi
@@ -160,12 +216,19 @@ entry:
}
define fp128 @fma(fp128 %x, fp128 %y, fp128 %z) nounwind strictfp {
-; CHECK-LABEL: fma:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq fmaf128
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: fma:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq fmal@PLT
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: fma:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq fmaf128@PLT
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: fma:
; X86: # %bb.0: # %entry
@@ -201,12 +264,19 @@ entry:
}
define fp128 @frem(fp128 %x, fp128 %y) nounwind strictfp {
-; CHECK-LABEL: frem:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq fmodf128
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: frem:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq fmodl@PLT
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: frem:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq fmodf128@PLT
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: frem:
; X86: # %bb.0: # %entry
@@ -238,12 +308,19 @@ entry:
}
define fp128 @ceil(fp128 %x) nounwind strictfp {
-; CHECK-LABEL: ceil:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq ceilf128
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: ceil:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq ceill@PLT
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: ceil:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq ceilf128@PLT
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: ceil:
; X86: # %bb.0: # %entry
@@ -271,12 +348,19 @@ entry:
}
define fp128 @cos(fp128 %x) nounwind strictfp {
-; CHECK-LABEL: cos:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq cosf128
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: cos:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq cosl@PLT
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: cos:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq cosf128@PLT
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: cos:
; X86: # %bb.0: # %entry
@@ -304,12 +388,19 @@ entry:
}
define fp128 @exp(fp128 %x) nounwind strictfp {
-; CHECK-LABEL: exp:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq expf128
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: exp:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq expl@PLT
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: exp:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq expf128@PLT
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: exp:
; X86: # %bb.0: # %entry
@@ -337,12 +428,19 @@ entry:
}
define fp128 @exp2(fp128 %x) nounwind strictfp {
-; CHECK-LABEL: exp2:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq exp2f128
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: exp2:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq exp2l@PLT
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: exp2:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq exp2f128@PLT
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: exp2:
; X86: # %bb.0: # %entry
@@ -370,12 +468,19 @@ entry:
}
define fp128 @floor(fp128 %x) nounwind strictfp {
-; CHECK-LABEL: floor:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq floorf128
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: floor:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq floorl@PLT
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: floor:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq floorf128@PLT
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: floor:
; X86: # %bb.0: # %entry
@@ -403,12 +508,19 @@ entry:
}
define fp128 @log(fp128 %x) nounwind strictfp {
-; CHECK-LABEL: log:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq logf128
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: log:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq logl@PLT
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: log:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq logf128@PLT
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: log:
; X86: # %bb.0: # %entry
@@ -436,12 +548,19 @@ entry:
}
define fp128 @log10(fp128 %x) nounwind strictfp {
-; CHECK-LABEL: log10:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq log10f128
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: log10:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq log10l@PLT
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: log10:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq log10f128@PLT
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: log10:
; X86: # %bb.0: # %entry
@@ -469,12 +588,19 @@ entry:
}
define fp128 @log2(fp128 %x) nounwind strictfp {
-; CHECK-LABEL: log2:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq log2f128
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: log2:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq log2l@PLT
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: log2:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq log2f128@PLT
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: log2:
; X86: # %bb.0: # %entry
@@ -502,12 +628,19 @@ entry:
}
define fp128 @maxnum(fp128 %x, fp128 %y) nounwind strictfp {
-; CHECK-LABEL: maxnum:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq fmaxf128
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: maxnum:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq fmaxl@PLT
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: maxnum:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq fmaxf128@PLT
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: maxnum:
; X86: # %bb.0: # %entry
@@ -539,12 +672,19 @@ entry:
}
define fp128 @minnum(fp128 %x, fp128 %y) nounwind strictfp {
-; CHECK-LABEL: minnum:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq fminf128
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: minnum:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq fminl@PLT
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: minnum:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq fminf128@PLT
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: minnum:
; X86: # %bb.0: # %entry
@@ -576,12 +716,19 @@ entry:
}
define fp128 @nearbyint(fp128 %x) nounwind strictfp {
-; CHECK-LABEL: nearbyint:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq nearbyintf128
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: nearbyint:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq nearbyintl@PLT
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: nearbyint:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq nearbyintf128@PLT
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: nearbyint:
; X86: # %bb.0: # %entry
@@ -609,12 +756,19 @@ entry:
}
define fp128 @pow(fp128 %x, fp128 %y) nounwind strictfp {
-; CHECK-LABEL: pow:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq powf128
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: pow:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq powl@PLT
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: pow:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq powf128@PLT
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: pow:
; X86: # %bb.0: # %entry
@@ -653,6 +807,20 @@ define fp128 @powi(fp128 %x, i32 %y) nounwind strictfp {
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
;
+; ANDROID-LABEL: powi:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq __powitf2@PLT
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: powi:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq __powitf2@PLT
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
+;
; X86-LABEL: powi:
; X86: # %bb.0: # %entry
; X86-NEXT: pushl %esi
@@ -680,12 +848,19 @@ entry:
}
define fp128 @rint(fp128 %x) nounwind strictfp {
-; CHECK-LABEL: rint:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq rintf128
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: rint:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq rintl@PLT
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: rint:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq rintf128@PLT
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: rint:
; X86: # %bb.0: # %entry
@@ -713,12 +888,19 @@ entry:
}
define fp128 @round(fp128 %x) nounwind strictfp {
-; CHECK-LABEL: round:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq roundf128
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: round:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq roundl@PLT
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: round:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq roundf128@PLT
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: round:
; X86: # %bb.0: # %entry
@@ -746,12 +928,19 @@ entry:
}
define fp128 @roundeven(fp128 %x) nounwind strictfp {
-; CHECK-LABEL: roundeven:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq roundevenf128
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: roundeven:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq roundevenl@PLT
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: roundeven:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq roundevenf128@PLT
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: roundeven:
; X86: # %bb.0: # %entry
@@ -779,12 +968,19 @@ entry:
}
define fp128 @sin(fp128 %x) nounwind strictfp {
-; CHECK-LABEL: sin:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq sinf128
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: sin:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq sinl@PLT
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: sin:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq sinf128@PLT
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: sin:
; X86: # %bb.0: # %entry
@@ -812,12 +1008,19 @@ entry:
}
define fp128 @sqrt(fp128 %x) nounwind strictfp {
-; CHECK-LABEL: sqrt:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq sqrtf128
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: sqrt:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq sqrtl@PLT
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: sqrt:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq sqrtf128@PLT
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: sqrt:
; X86: # %bb.0: # %entry
@@ -845,12 +1048,19 @@ entry:
}
define fp128 @trunc(fp128 %x) nounwind strictfp {
-; CHECK-LABEL: trunc:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq truncf128
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: trunc:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq truncl@PLT
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: trunc:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq truncf128@PLT
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: trunc:
; X86: # %bb.0: # %entry
@@ -878,12 +1088,19 @@ entry:
}
define i32 @lrint(fp128 %x) nounwind strictfp {
-; CHECK-LABEL: lrint:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq lrintf128
-; CHECK-NEXT: popq %rcx
-; CHECK-NEXT: retq
+; ANDROID-LABEL: lrint:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq lrintl@PLT
+; ANDROID-NEXT: popq %rcx
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: lrint:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq lrintf128@PLT
+; GNU-NEXT: popq %rcx
+; GNU-NEXT: retq
;
; X86-LABEL: lrint:
; X86: # %bb.0: # %entry
@@ -901,12 +1118,19 @@ entry:
}
define i64 @llrint(fp128 %x) nounwind strictfp {
-; CHECK-LABEL: llrint:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq llrintf128
-; CHECK-NEXT: popq %rcx
-; CHECK-NEXT: retq
+; ANDROID-LABEL: llrint:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq llrintl@PLT
+; ANDROID-NEXT: popq %rcx
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: llrint:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq llrintf128@PLT
+; GNU-NEXT: popq %rcx
+; GNU-NEXT: retq
;
; X86-LABEL: llrint:
; X86: # %bb.0: # %entry
@@ -924,12 +1148,19 @@ entry:
}
define i32 @lround(fp128 %x) nounwind strictfp {
-; CHECK-LABEL: lround:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq lroundf128
-; CHECK-NEXT: popq %rcx
-; CHECK-NEXT: retq
+; ANDROID-LABEL: lround:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq lroundl@PLT
+; ANDROID-NEXT: popq %rcx
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: lround:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq lroundf128@PLT
+; GNU-NEXT: popq %rcx
+; GNU-NEXT: retq
;
; X86-LABEL: lround:
; X86: # %bb.0: # %entry
@@ -947,12 +1178,19 @@ entry:
}
define i64 @llround(fp128 %x) nounwind strictfp {
-; CHECK-LABEL: llround:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq llroundf128
-; CHECK-NEXT: popq %rcx
-; CHECK-NEXT: retq
+; ANDROID-LABEL: llround:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq llroundl@PLT
+; ANDROID-NEXT: popq %rcx
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: llround:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq llroundf128@PLT
+; GNU-NEXT: popq %rcx
+; GNU-NEXT: retq
;
; X86-LABEL: llround:
; X86: # %bb.0: # %entry
@@ -986,6 +1224,38 @@ define i64 @cmp(i64 %a, i64 %b, fp128 %x, fp128 %y) #0 {
; CHECK-NEXT: popq %r14
; CHECK-NEXT: retq
;
+; ANDROID-LABEL: cmp:
+; ANDROID: # %bb.0:
+; ANDROID-NEXT: pushq %r14
+; ANDROID-NEXT: pushq %rbx
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: movq %rsi, %rbx
+; ANDROID-NEXT: movq %rdi, %r14
+; ANDROID-NEXT: callq __eqtf2@PLT
+; ANDROID-NEXT: testl %eax, %eax
+; ANDROID-NEXT: cmovneq %rbx, %r14
+; ANDROID-NEXT: movq %r14, %rax
+; ANDROID-NEXT: addq $8, %rsp
+; ANDROID-NEXT: popq %rbx
+; ANDROID-NEXT: popq %r14
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: cmp:
+; GNU: # %bb.0:
+; GNU-NEXT: pushq %r14
+; GNU-NEXT: pushq %rbx
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: movq %rsi, %rbx
+; GNU-NEXT: movq %rdi, %r14
+; GNU-NEXT: callq __eqtf2@PLT
+; GNU-NEXT: testl %eax, %eax
+; GNU-NEXT: cmovneq %rbx, %r14
+; GNU-NEXT: movq %r14, %rax
+; GNU-NEXT: addq $8, %rsp
+; GNU-NEXT: popq %rbx
+; GNU-NEXT: popq %r14
+; GNU-NEXT: retq
+;
; X86-LABEL: cmp:
; X86: # %bb.0:
; X86-NEXT: subl $12, %esp
@@ -1032,6 +1302,38 @@ define i64 @cmps(i64 %a, i64 %b, fp128 %x, fp128 %y) #0 {
; CHECK-NEXT: popq %r14
; CHECK-NEXT: retq
;
+; ANDROID-LABEL: cmps:
+; ANDROID: # %bb.0:
+; ANDROID-NEXT: pushq %r14
+; ANDROID-NEXT: pushq %rbx
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: movq %rsi, %rbx
+; ANDROID-NEXT: movq %rdi, %r14
+; ANDROID-NEXT: callq __eqtf2@PLT
+; ANDROID-NEXT: testl %eax, %eax
+; ANDROID-NEXT: cmovneq %rbx, %r14
+; ANDROID-NEXT: movq %r14, %rax
+; ANDROID-NEXT: addq $8, %rsp
+; ANDROID-NEXT: popq %rbx
+; ANDROID-NEXT: popq %r14
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: cmps:
+; GNU: # %bb.0:
+; GNU-NEXT: pushq %r14
+; GNU-NEXT: pushq %rbx
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: movq %rsi, %rbx
+; GNU-NEXT: movq %rdi, %r14
+; GNU-NEXT: callq __eqtf2@PLT
+; GNU-NEXT: testl %eax, %eax
+; GNU-NEXT: cmovneq %rbx, %r14
+; GNU-NEXT: movq %r14, %rax
+; GNU-NEXT: addq $8, %rsp
+; GNU-NEXT: popq %rbx
+; GNU-NEXT: popq %r14
+; GNU-NEXT: retq
+;
; X86-LABEL: cmps:
; X86: # %bb.0:
; X86-NEXT: subl $12, %esp
@@ -1089,6 +1391,60 @@ define i64 @cmp_ueq_q(i64 %a, i64 %b, fp128 %x, fp128 %y) #0 {
; CHECK-NEXT: popq %rbp
; CHECK-NEXT: retq
;
+; ANDROID-LABEL: cmp_ueq_q:
+; ANDROID: # %bb.0:
+; ANDROID-NEXT: pushq %rbp
+; ANDROID-NEXT: pushq %r14
+; ANDROID-NEXT: pushq %rbx
+; ANDROID-NEXT: subq $32, %rsp
+; ANDROID-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; ANDROID-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; ANDROID-NEXT: movq %rsi, %rbx
+; ANDROID-NEXT: movq %rdi, %r14
+; ANDROID-NEXT: callq __eqtf2@PLT
+; ANDROID-NEXT: testl %eax, %eax
+; ANDROID-NEXT: sete %bpl
+; ANDROID-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; ANDROID-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; ANDROID-NEXT: callq __unordtf2@PLT
+; ANDROID-NEXT: testl %eax, %eax
+; ANDROID-NEXT: setne %al
+; ANDROID-NEXT: orb %bpl, %al
+; ANDROID-NEXT: cmoveq %rbx, %r14
+; ANDROID-NEXT: movq %r14, %rax
+; ANDROID-NEXT: addq $32, %rsp
+; ANDROID-NEXT: popq %rbx
+; ANDROID-NEXT: popq %r14
+; ANDROID-NEXT: popq %rbp
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: cmp_ueq_q:
+; GNU: # %bb.0:
+; GNU-NEXT: pushq %rbp
+; GNU-NEXT: pushq %r14
+; GNU-NEXT: pushq %rbx
+; GNU-NEXT: subq $32, %rsp
+; GNU-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; GNU-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; GNU-NEXT: movq %rsi, %rbx
+; GNU-NEXT: movq %rdi, %r14
+; GNU-NEXT: callq __eqtf2@PLT
+; GNU-NEXT: testl %eax, %eax
+; GNU-NEXT: sete %bpl
+; GNU-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; GNU-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; GNU-NEXT: callq __unordtf2@PLT
+; GNU-NEXT: testl %eax, %eax
+; GNU-NEXT: setne %al
+; GNU-NEXT: orb %bpl, %al
+; GNU-NEXT: cmoveq %rbx, %r14
+; GNU-NEXT: movq %r14, %rax
+; GNU-NEXT: addq $32, %rsp
+; GNU-NEXT: popq %rbx
+; GNU-NEXT: popq %r14
+; GNU-NEXT: popq %rbp
+; GNU-NEXT: retq
+;
; X86-LABEL: cmp_ueq_q:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
@@ -1172,6 +1528,60 @@ define i64 @cmp_one_q(i64 %a, i64 %b, fp128 %x, fp128 %y) #0 {
; CHECK-NEXT: popq %rbp
; CHECK-NEXT: retq
;
+; ANDROID-LABEL: cmp_one_q:
+; ANDROID: # %bb.0:
+; ANDROID-NEXT: pushq %rbp
+; ANDROID-NEXT: pushq %r14
+; ANDROID-NEXT: pushq %rbx
+; ANDROID-NEXT: subq $32, %rsp
+; ANDROID-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; ANDROID-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; ANDROID-NEXT: movq %rsi, %rbx
+; ANDROID-NEXT: movq %rdi, %r14
+; ANDROID-NEXT: callq __eqtf2@PLT
+; ANDROID-NEXT: testl %eax, %eax
+; ANDROID-NEXT: setne %bpl
+; ANDROID-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; ANDROID-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; ANDROID-NEXT: callq __unordtf2@PLT
+; ANDROID-NEXT: testl %eax, %eax
+; ANDROID-NEXT: sete %al
+; ANDROID-NEXT: testb %bpl, %al
+; ANDROID-NEXT: cmoveq %rbx, %r14
+; ANDROID-NEXT: movq %r14, %rax
+; ANDROID-NEXT: addq $32, %rsp
+; ANDROID-NEXT: popq %rbx
+; ANDROID-NEXT: popq %r14
+; ANDROID-NEXT: popq %rbp
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: cmp_one_q:
+; GNU: # %bb.0:
+; GNU-NEXT: pushq %rbp
+; GNU-NEXT: pushq %r14
+; GNU-NEXT: pushq %rbx
+; GNU-NEXT: subq $32, %rsp
+; GNU-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; GNU-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; GNU-NEXT: movq %rsi, %rbx
+; GNU-NEXT: movq %rdi, %r14
+; GNU-NEXT: callq __eqtf2@PLT
+; GNU-NEXT: testl %eax, %eax
+; GNU-NEXT: setne %bpl
+; GNU-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; GNU-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; GNU-NEXT: callq __unordtf2@PLT
+; GNU-NEXT: testl %eax, %eax
+; GNU-NEXT: sete %al
+; GNU-NEXT: testb %bpl, %al
+; GNU-NEXT: cmoveq %rbx, %r14
+; GNU-NEXT: movq %r14, %rax
+; GNU-NEXT: addq $32, %rsp
+; GNU-NEXT: popq %rbx
+; GNU-NEXT: popq %r14
+; GNU-NEXT: popq %rbp
+; GNU-NEXT: retq
+;
; X86-LABEL: cmp_one_q:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
diff --git a/llvm/test/CodeGen/X86/fp128-libcalls.ll b/llvm/test/CodeGen/X86/fp128-libcalls.ll
index 6946ca2..bb75ec1 100644
--- a/llvm/test/CodeGen/X86/fp128-libcalls.ll
+++ b/llvm/test/CodeGen/X86/fp128-libcalls.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -O2 -mtriple=x86_64-linux-android \
-; RUN: -enable-legalize-types-checking | FileCheck %s
+; RUN: -enable-legalize-types-checking | FileCheck %s --check-prefix=ANDROID
; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu \
-; RUN: -enable-legalize-types-checking | FileCheck %s
+; RUN: -enable-legalize-types-checking | FileCheck %s --check-prefix=GNU
; RUN: llc < %s -O2 -mtriple=i686-linux-gnu -mattr=sse2 \
; RUN: -enable-legalize-types-checking | FileCheck %s --check-prefix=X86
@@ -20,6 +20,22 @@ define dso_local void @Test128Add(fp128 %d1, fp128 %d2) nounwind {
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
;
+; ANDROID-LABEL: Test128Add:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq __addtf3@PLT
+; ANDROID-NEXT: movaps %xmm0, vf128(%rip)
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: Test128Add:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq __addtf3@PLT
+; GNU-NEXT: movaps %xmm0, vf128(%rip)
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
+;
; X86-LABEL: Test128Add:
; X86: # %bb.0: # %entry
; X86-NEXT: subl $40, %esp
@@ -56,6 +72,26 @@ define dso_local void @Test128_1Add(fp128 %d1) nounwind {
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
;
+; ANDROID-LABEL: Test128_1Add:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: movaps %xmm0, %xmm1
+; ANDROID-NEXT: movaps vf128(%rip), %xmm0
+; ANDROID-NEXT: callq __addtf3@PLT
+; ANDROID-NEXT: movaps %xmm0, vf128(%rip)
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: Test128_1Add:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: movaps %xmm0, %xmm1
+; GNU-NEXT: movaps vf128(%rip), %xmm0
+; GNU-NEXT: callq __addtf3@PLT
+; GNU-NEXT: movaps %xmm0, vf128(%rip)
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
+;
; X86-LABEL: Test128_1Add:
; X86: # %bb.0: # %entry
; X86-NEXT: subl $40, %esp
@@ -91,6 +127,22 @@ define dso_local void @Test128Sub(fp128 %d1, fp128 %d2) nounwind {
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
;
+; ANDROID-LABEL: Test128Sub:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq __subtf3@PLT
+; ANDROID-NEXT: movaps %xmm0, vf128(%rip)
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: Test128Sub:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq __subtf3@PLT
+; GNU-NEXT: movaps %xmm0, vf128(%rip)
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
+;
; X86-LABEL: Test128Sub:
; X86: # %bb.0: # %entry
; X86-NEXT: subl $40, %esp
@@ -127,6 +179,26 @@ define dso_local void @Test128_1Sub(fp128 %d1) nounwind {
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
;
+; ANDROID-LABEL: Test128_1Sub:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: movaps %xmm0, %xmm1
+; ANDROID-NEXT: movaps vf128(%rip), %xmm0
+; ANDROID-NEXT: callq __subtf3@PLT
+; ANDROID-NEXT: movaps %xmm0, vf128(%rip)
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: Test128_1Sub:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: movaps %xmm0, %xmm1
+; GNU-NEXT: movaps vf128(%rip), %xmm0
+; GNU-NEXT: callq __subtf3@PLT
+; GNU-NEXT: movaps %xmm0, vf128(%rip)
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
+;
; X86-LABEL: Test128_1Sub:
; X86: # %bb.0: # %entry
; X86-NEXT: subl $40, %esp
@@ -162,6 +234,22 @@ define dso_local void @Test128Mul(fp128 %d1, fp128 %d2) nounwind {
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
;
+; ANDROID-LABEL: Test128Mul:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq __multf3@PLT
+; ANDROID-NEXT: movaps %xmm0, vf128(%rip)
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: Test128Mul:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq __multf3@PLT
+; GNU-NEXT: movaps %xmm0, vf128(%rip)
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
+;
; X86-LABEL: Test128Mul:
; X86: # %bb.0: # %entry
; X86-NEXT: subl $40, %esp
@@ -198,6 +286,26 @@ define dso_local void @Test128_1Mul(fp128 %d1) nounwind {
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
;
+; ANDROID-LABEL: Test128_1Mul:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: movaps %xmm0, %xmm1
+; ANDROID-NEXT: movaps vf128(%rip), %xmm0
+; ANDROID-NEXT: callq __multf3@PLT
+; ANDROID-NEXT: movaps %xmm0, vf128(%rip)
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: Test128_1Mul:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: movaps %xmm0, %xmm1
+; GNU-NEXT: movaps vf128(%rip), %xmm0
+; GNU-NEXT: callq __multf3@PLT
+; GNU-NEXT: movaps %xmm0, vf128(%rip)
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
+;
; X86-LABEL: Test128_1Mul:
; X86: # %bb.0: # %entry
; X86-NEXT: subl $40, %esp
@@ -233,6 +341,22 @@ define dso_local void @Test128Div(fp128 %d1, fp128 %d2) nounwind {
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
;
+; ANDROID-LABEL: Test128Div:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq __divtf3@PLT
+; ANDROID-NEXT: movaps %xmm0, vf128(%rip)
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: Test128Div:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq __divtf3@PLT
+; GNU-NEXT: movaps %xmm0, vf128(%rip)
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
+;
; X86-LABEL: Test128Div:
; X86: # %bb.0: # %entry
; X86-NEXT: subl $40, %esp
@@ -269,6 +393,26 @@ define dso_local void @Test128_1Div(fp128 %d1) nounwind {
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
;
+; ANDROID-LABEL: Test128_1Div:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: movaps %xmm0, %xmm1
+; ANDROID-NEXT: movaps vf128(%rip), %xmm0
+; ANDROID-NEXT: callq __divtf3@PLT
+; ANDROID-NEXT: movaps %xmm0, vf128(%rip)
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: Test128_1Div:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: movaps %xmm0, %xmm1
+; GNU-NEXT: movaps vf128(%rip), %xmm0
+; GNU-NEXT: callq __divtf3@PLT
+; GNU-NEXT: movaps %xmm0, vf128(%rip)
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
+;
; X86-LABEL: Test128_1Div:
; X86: # %bb.0: # %entry
; X86-NEXT: subl $40, %esp
@@ -296,13 +440,21 @@ entry:
}
define dso_local void @Test128Rem(fp128 %d1, fp128 %d2) nounwind {
-; CHECK-LABEL: Test128Rem:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq fmodf128
-; CHECK-NEXT: movaps %xmm0, vf128(%rip)
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: Test128Rem:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq fmodl@PLT
+; ANDROID-NEXT: movaps %xmm0, vf128(%rip)
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: Test128Rem:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq fmodf128@PLT
+; GNU-NEXT: movaps %xmm0, vf128(%rip)
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: Test128Rem:
; X86: # %bb.0: # %entry
@@ -330,15 +482,25 @@ entry:
}
define dso_local void @Test128_1Rem(fp128 %d1) nounwind {
-; CHECK-LABEL: Test128_1Rem:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: movaps %xmm0, %xmm1
-; CHECK-NEXT: movaps vf128(%rip), %xmm0
-; CHECK-NEXT: callq fmodf128
-; CHECK-NEXT: movaps %xmm0, vf128(%rip)
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: Test128_1Rem:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: movaps %xmm0, %xmm1
+; ANDROID-NEXT: movaps vf128(%rip), %xmm0
+; ANDROID-NEXT: callq fmodl@PLT
+; ANDROID-NEXT: movaps %xmm0, vf128(%rip)
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: Test128_1Rem:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: movaps %xmm0, %xmm1
+; GNU-NEXT: movaps vf128(%rip), %xmm0
+; GNU-NEXT: callq fmodf128@PLT
+; GNU-NEXT: movaps %xmm0, vf128(%rip)
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: Test128_1Rem:
; X86: # %bb.0: # %entry
@@ -367,13 +529,21 @@ entry:
}
define dso_local void @Test128Sqrt(fp128 %d1) nounwind {
-; CHECK-LABEL: Test128Sqrt:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq sqrtf128
-; CHECK-NEXT: movaps %xmm0, vf128(%rip)
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: Test128Sqrt:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq sqrtl@PLT
+; ANDROID-NEXT: movaps %xmm0, vf128(%rip)
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: Test128Sqrt:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq sqrtf128@PLT
+; GNU-NEXT: movaps %xmm0, vf128(%rip)
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: Test128Sqrt:
; X86: # %bb.0: # %entry
@@ -398,13 +568,21 @@ entry:
declare fp128 @llvm.sqrt.f128(fp128)
define dso_local void @Test128Sin(fp128 %d1) nounwind {
-; CHECK-LABEL: Test128Sin:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq sinf128
-; CHECK-NEXT: movaps %xmm0, vf128(%rip)
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: Test128Sin:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq sinl@PLT
+; ANDROID-NEXT: movaps %xmm0, vf128(%rip)
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: Test128Sin:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq sinf128@PLT
+; GNU-NEXT: movaps %xmm0, vf128(%rip)
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: Test128Sin:
; X86: # %bb.0: # %entry
@@ -429,13 +607,21 @@ entry:
declare fp128 @llvm.sin.f128(fp128)
define dso_local void @Test128Cos(fp128 %d1) nounwind {
-; CHECK-LABEL: Test128Cos:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq cosf128
-; CHECK-NEXT: movaps %xmm0, vf128(%rip)
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: Test128Cos:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq cosl@PLT
+; ANDROID-NEXT: movaps %xmm0, vf128(%rip)
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: Test128Cos:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq cosf128@PLT
+; GNU-NEXT: movaps %xmm0, vf128(%rip)
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: Test128Cos:
; X86: # %bb.0: # %entry
@@ -460,13 +646,21 @@ entry:
declare fp128 @llvm.cos.f128(fp128)
define dso_local void @Test128Ceil(fp128 %d1) nounwind {
-; CHECK-LABEL: Test128Ceil:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq ceilf128
-; CHECK-NEXT: movaps %xmm0, vf128(%rip)
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: Test128Ceil:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq ceill@PLT
+; ANDROID-NEXT: movaps %xmm0, vf128(%rip)
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: Test128Ceil:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq ceilf128@PLT
+; GNU-NEXT: movaps %xmm0, vf128(%rip)
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: Test128Ceil:
; X86: # %bb.0: # %entry
@@ -491,13 +685,21 @@ entry:
declare fp128 @llvm.ceil.f128(fp128)
define dso_local void @Test128Floor(fp128 %d1) nounwind {
-; CHECK-LABEL: Test128Floor:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq floorf128
-; CHECK-NEXT: movaps %xmm0, vf128(%rip)
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: Test128Floor:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq floorl@PLT
+; ANDROID-NEXT: movaps %xmm0, vf128(%rip)
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: Test128Floor:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq floorf128@PLT
+; GNU-NEXT: movaps %xmm0, vf128(%rip)
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: Test128Floor:
; X86: # %bb.0: # %entry
@@ -522,13 +724,21 @@ entry:
declare fp128 @llvm.floor.f128(fp128)
define dso_local void @Test128Trunc(fp128 %d1) nounwind {
-; CHECK-LABEL: Test128Trunc:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq truncf128
-; CHECK-NEXT: movaps %xmm0, vf128(%rip)
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: Test128Trunc:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq truncl@PLT
+; ANDROID-NEXT: movaps %xmm0, vf128(%rip)
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: Test128Trunc:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq truncf128@PLT
+; GNU-NEXT: movaps %xmm0, vf128(%rip)
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: Test128Trunc:
; X86: # %bb.0: # %entry
@@ -553,13 +763,21 @@ entry:
declare fp128 @llvm.trunc.f128(fp128)
define dso_local void @Test128Nearbyint(fp128 %d1) nounwind {
-; CHECK-LABEL: Test128Nearbyint:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq nearbyintf128
-; CHECK-NEXT: movaps %xmm0, vf128(%rip)
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: Test128Nearbyint:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq nearbyintl@PLT
+; ANDROID-NEXT: movaps %xmm0, vf128(%rip)
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: Test128Nearbyint:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq nearbyintf128@PLT
+; GNU-NEXT: movaps %xmm0, vf128(%rip)
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: Test128Nearbyint:
; X86: # %bb.0: # %entry
@@ -584,13 +802,21 @@ entry:
declare fp128 @llvm.nearbyint.f128(fp128)
define dso_local void @Test128Rint(fp128 %d1) nounwind {
-; CHECK-LABEL: Test128Rint:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq rintf128
-; CHECK-NEXT: movaps %xmm0, vf128(%rip)
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: Test128Rint:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq rintl@PLT
+; ANDROID-NEXT: movaps %xmm0, vf128(%rip)
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: Test128Rint:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq rintf128@PLT
+; GNU-NEXT: movaps %xmm0, vf128(%rip)
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: Test128Rint:
; X86: # %bb.0: # %entry
@@ -615,13 +841,21 @@ entry:
declare fp128 @llvm.rint.f128(fp128)
define dso_local void @Test128Round(fp128 %d1) nounwind {
-; CHECK-LABEL: Test128Round:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: callq roundf128
-; CHECK-NEXT: movaps %xmm0, vf128(%rip)
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: retq
+; ANDROID-LABEL: Test128Round:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq roundl@PLT
+; ANDROID-NEXT: movaps %xmm0, vf128(%rip)
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: Test128Round:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq roundf128@PLT
+; GNU-NEXT: movaps %xmm0, vf128(%rip)
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
;
; X86-LABEL: Test128Round:
; X86: # %bb.0: # %entry
@@ -646,9 +880,13 @@ entry:
declare fp128 @llvm.round.f128(fp128)
define fp128 @Test128FMA(fp128 %a, fp128 %b, fp128 %c) nounwind {
-; CHECK-LABEL: Test128FMA:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: jmp fmaf128@PLT # TAILCALL
+; ANDROID-LABEL: Test128FMA:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: jmp fmal@PLT # TAILCALL
+;
+; GNU-LABEL: Test128FMA:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: jmp fmaf128@PLT # TAILCALL
;
; X86-LABEL: Test128FMA:
; X86: # %bb.0: # %entry
diff --git a/llvm/test/CodeGen/X86/load-combine.ll b/llvm/test/CodeGen/X86/load-combine.ll
index 7e4e11f..b5f3e78 100644
--- a/llvm/test/CodeGen/X86/load-combine.ll
+++ b/llvm/test/CodeGen/X86/load-combine.ll
@@ -1283,7 +1283,6 @@ define i32 @zext_load_i32_by_i8_bswap_shl_16(ptr %arg) {
ret i32 %tmp8
}
-; FIXME: This is a miscompile.
define i32 @pr80911_vector_load_multiuse(ptr %ptr, ptr %clobber) nounwind {
; CHECK-LABEL: pr80911_vector_load_multiuse:
; CHECK: # %bb.0:
@@ -1299,9 +1298,9 @@ define i32 @pr80911_vector_load_multiuse(ptr %ptr, ptr %clobber) nounwind {
;
; CHECK64-LABEL: pr80911_vector_load_multiuse:
; CHECK64: # %bb.0:
+; CHECK64-NEXT: movl (%rdi), %ecx
; CHECK64-NEXT: movzwl (%rdi), %eax
; CHECK64-NEXT: movl $0, (%rsi)
-; CHECK64-NEXT: movl (%rdi), %ecx
; CHECK64-NEXT: movl %ecx, (%rdi)
; CHECK64-NEXT: retq
%load = load <4 x i8>, ptr %ptr, align 16
diff --git a/llvm/test/CodeGen/X86/statepoint-relocate-undef.ll b/llvm/test/CodeGen/X86/statepoint-relocate-undef.ll
new file mode 100644
index 0000000..69e6976
--- /dev/null
+++ b/llvm/test/CodeGen/X86/statepoint-relocate-undef.ll
@@ -0,0 +1,32 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs < %s | FileCheck %s
+
+target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-pc-linux-gnu"
+
+declare void @use(...)
+declare void @f()
+declare token @llvm.experimental.gc.statepoint.p0(i64, i32, ptr, i32, i32, ...)
+declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32, i32)
+
+;; Check that llvm doesn't crash if relocate with undef base/derived ptr survives until isel
+define void @test_gcrelocate_undef(ptr addrspace(1) %ptr) gc "statepoint-example" {
+; CHECK-LABEL: test_gcrelocate_undef:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: movq %rdi, (%rsp)
+; CHECK-NEXT: callq f@PLT
+; CHECK-NEXT: .Ltmp0:
+; CHECK-NEXT: movl $4278124286, %edi # imm = 0xFEFEFEFE
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: callq use@PLT
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
+ %tok = tail call token (i64, i32, ptr, i32, i32, ...)
+ @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @f, i32 0, i32 0, i32 0, i32 0) ["gc-live" (ptr addrspace(1) %ptr, ptr addrspace(1) undef), "deopt" (ptr addrspace(1) %ptr, i32 undef)]
+ %a = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %tok, i32 0, i32 1)
+ call void (...) @use(ptr addrspace(1) %a)
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/vector-reduce-ctpop.ll b/llvm/test/CodeGen/X86/vector-reduce-ctpop.ll
index aced5e0..e218d91 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-ctpop.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-ctpop.ll
@@ -505,6 +505,466 @@ define i32 @reduce_ctpop_v8i32(<8 x i32> %a0) {
ret i32 %r0
}
+define i64 @reduce_ctpop_v8i64(<8 x i64> %a0) {
+; SSE42-LABEL: reduce_ctpop_v8i64:
+; SSE42: # %bb.0:
+; SSE42-NEXT: movdqa {{.*#+}} xmm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE42-NEXT: movdqa %xmm2, %xmm6
+; SSE42-NEXT: pand %xmm5, %xmm6
+; SSE42-NEXT: movdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSE42-NEXT: movdqa %xmm4, %xmm7
+; SSE42-NEXT: pshufb %xmm6, %xmm7
+; SSE42-NEXT: psrlw $4, %xmm2
+; SSE42-NEXT: pand %xmm5, %xmm2
+; SSE42-NEXT: movdqa %xmm4, %xmm8
+; SSE42-NEXT: pshufb %xmm2, %xmm8
+; SSE42-NEXT: paddb %xmm7, %xmm8
+; SSE42-NEXT: pxor %xmm2, %xmm2
+; SSE42-NEXT: psadbw %xmm2, %xmm8
+; SSE42-NEXT: movdqa %xmm0, %xmm6
+; SSE42-NEXT: pand %xmm5, %xmm6
+; SSE42-NEXT: movdqa %xmm4, %xmm7
+; SSE42-NEXT: pshufb %xmm6, %xmm7
+; SSE42-NEXT: psrlw $4, %xmm0
+; SSE42-NEXT: pand %xmm5, %xmm0
+; SSE42-NEXT: movdqa %xmm4, %xmm6
+; SSE42-NEXT: pshufb %xmm0, %xmm6
+; SSE42-NEXT: paddb %xmm7, %xmm6
+; SSE42-NEXT: psadbw %xmm2, %xmm6
+; SSE42-NEXT: paddq %xmm8, %xmm6
+; SSE42-NEXT: movdqa %xmm3, %xmm0
+; SSE42-NEXT: pand %xmm5, %xmm0
+; SSE42-NEXT: movdqa %xmm4, %xmm7
+; SSE42-NEXT: pshufb %xmm0, %xmm7
+; SSE42-NEXT: psrlw $4, %xmm3
+; SSE42-NEXT: pand %xmm5, %xmm3
+; SSE42-NEXT: movdqa %xmm4, %xmm0
+; SSE42-NEXT: pshufb %xmm3, %xmm0
+; SSE42-NEXT: paddb %xmm7, %xmm0
+; SSE42-NEXT: psadbw %xmm2, %xmm0
+; SSE42-NEXT: movdqa %xmm1, %xmm3
+; SSE42-NEXT: pand %xmm5, %xmm3
+; SSE42-NEXT: movdqa %xmm4, %xmm7
+; SSE42-NEXT: pshufb %xmm3, %xmm7
+; SSE42-NEXT: psrlw $4, %xmm1
+; SSE42-NEXT: pand %xmm5, %xmm1
+; SSE42-NEXT: pshufb %xmm1, %xmm4
+; SSE42-NEXT: paddb %xmm7, %xmm4
+; SSE42-NEXT: psadbw %xmm2, %xmm4
+; SSE42-NEXT: paddq %xmm0, %xmm4
+; SSE42-NEXT: paddq %xmm6, %xmm4
+; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
+; SSE42-NEXT: paddq %xmm4, %xmm0
+; SSE42-NEXT: movq %xmm0, %rax
+; SSE42-NEXT: retq
+;
+; AVX2-LABEL: reduce_ctpop_v8i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm3
+; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX2-NEXT: vpshufb %ymm3, %ymm4, %ymm3
+; AVX2-NEXT: vpsrlw $4, %ymm1, %ymm1
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpshufb %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpaddb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT: vpsadbw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpshufb %ymm5, %ymm4, %ymm5
+; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpaddb %ymm5, %ymm0, %ymm0
+; AVX2-NEXT: vpsadbw %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %rax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512VL-LABEL: reduce_ctpop_v8i64:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpbroadcastb {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT: vpandq %zmm1, %zmm0, %zmm2
+; AVX512VL-NEXT: vbroadcasti32x4 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512VL-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512VL-NEXT: vpshufb %zmm2, %zmm3, %zmm2
+; AVX512VL-NEXT: vpsrlw $4, %zmm0, %zmm0
+; AVX512VL-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT: vpshufb %zmm0, %zmm3, %zmm0
+; AVX512VL-NEXT: vpaddb %zmm2, %zmm0, %zmm0
+; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpsadbw %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT: vpmovqb %zmm0, %xmm0
+; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovq %xmm0, %rax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512VPOPCNT-LABEL: reduce_ctpop_v8i64:
+; AVX512VPOPCNT: # %bb.0:
+; AVX512VPOPCNT-NEXT: vpopcntq %zmm0, %zmm0
+; AVX512VPOPCNT-NEXT: vpmovqb %zmm0, %xmm0
+; AVX512VPOPCNT-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VPOPCNT-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNT-NEXT: vmovq %xmm0, %rax
+; AVX512VPOPCNT-NEXT: vzeroupper
+; AVX512VPOPCNT-NEXT: retq
+ %p0 = tail call <8 x i64> @llvm.ctpop.v8i64(<8 x i64> %a0)
+ %r0 = tail call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %p0)
+ ret i64 %r0
+}
+
+define i32 @reduce_ctpop_v16i32(<16 x i32> %a0) {
+; SSE42-LABEL: reduce_ctpop_v16i32:
+; SSE42: # %bb.0:
+; SSE42-NEXT: movdqa {{.*#+}} xmm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE42-NEXT: movdqa %xmm2, %xmm6
+; SSE42-NEXT: pand %xmm5, %xmm6
+; SSE42-NEXT: movdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSE42-NEXT: movdqa %xmm4, %xmm7
+; SSE42-NEXT: pshufb %xmm6, %xmm7
+; SSE42-NEXT: psrlw $4, %xmm2
+; SSE42-NEXT: pand %xmm5, %xmm2
+; SSE42-NEXT: movdqa %xmm4, %xmm6
+; SSE42-NEXT: pshufb %xmm2, %xmm6
+; SSE42-NEXT: paddb %xmm7, %xmm6
+; SSE42-NEXT: pxor %xmm2, %xmm2
+; SSE42-NEXT: pmovzxdq {{.*#+}} xmm7 = xmm6[0],zero,xmm6[1],zero
+; SSE42-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm2[2],xmm6[3],xmm2[3]
+; SSE42-NEXT: psadbw %xmm2, %xmm6
+; SSE42-NEXT: psadbw %xmm2, %xmm7
+; SSE42-NEXT: packuswb %xmm6, %xmm7
+; SSE42-NEXT: movdqa %xmm0, %xmm6
+; SSE42-NEXT: pand %xmm5, %xmm6
+; SSE42-NEXT: movdqa %xmm4, %xmm8
+; SSE42-NEXT: pshufb %xmm6, %xmm8
+; SSE42-NEXT: psrlw $4, %xmm0
+; SSE42-NEXT: pand %xmm5, %xmm0
+; SSE42-NEXT: movdqa %xmm4, %xmm6
+; SSE42-NEXT: pshufb %xmm0, %xmm6
+; SSE42-NEXT: paddb %xmm8, %xmm6
+; SSE42-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm6[0],zero,xmm6[1],zero
+; SSE42-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm2[2],xmm6[3],xmm2[3]
+; SSE42-NEXT: psadbw %xmm2, %xmm6
+; SSE42-NEXT: psadbw %xmm2, %xmm0
+; SSE42-NEXT: packuswb %xmm6, %xmm0
+; SSE42-NEXT: paddd %xmm7, %xmm0
+; SSE42-NEXT: movdqa %xmm3, %xmm6
+; SSE42-NEXT: pand %xmm5, %xmm6
+; SSE42-NEXT: movdqa %xmm4, %xmm7
+; SSE42-NEXT: pshufb %xmm6, %xmm7
+; SSE42-NEXT: psrlw $4, %xmm3
+; SSE42-NEXT: pand %xmm5, %xmm3
+; SSE42-NEXT: movdqa %xmm4, %xmm6
+; SSE42-NEXT: pshufb %xmm3, %xmm6
+; SSE42-NEXT: paddb %xmm7, %xmm6
+; SSE42-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm6[0],zero,xmm6[1],zero
+; SSE42-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm2[2],xmm6[3],xmm2[3]
+; SSE42-NEXT: psadbw %xmm2, %xmm6
+; SSE42-NEXT: psadbw %xmm2, %xmm3
+; SSE42-NEXT: packuswb %xmm6, %xmm3
+; SSE42-NEXT: movdqa %xmm1, %xmm6
+; SSE42-NEXT: pand %xmm5, %xmm6
+; SSE42-NEXT: movdqa %xmm4, %xmm7
+; SSE42-NEXT: pshufb %xmm6, %xmm7
+; SSE42-NEXT: psrlw $4, %xmm1
+; SSE42-NEXT: pand %xmm5, %xmm1
+; SSE42-NEXT: pshufb %xmm1, %xmm4
+; SSE42-NEXT: paddb %xmm7, %xmm4
+; SSE42-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm4[0],zero,xmm4[1],zero
+; SSE42-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; SSE42-NEXT: psadbw %xmm2, %xmm4
+; SSE42-NEXT: psadbw %xmm2, %xmm1
+; SSE42-NEXT: packuswb %xmm4, %xmm1
+; SSE42-NEXT: paddd %xmm3, %xmm1
+; SSE42-NEXT: paddd %xmm0, %xmm1
+; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE42-NEXT: paddd %xmm1, %xmm0
+; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; SSE42-NEXT: paddd %xmm0, %xmm1
+; SSE42-NEXT: movd %xmm1, %eax
+; SSE42-NEXT: retq
+;
+; AVX2-LABEL: reduce_ctpop_v16i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm3
+; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX2-NEXT: vpshufb %ymm3, %ymm4, %ymm3
+; AVX2-NEXT: vpsrlw $4, %ymm1, %ymm1
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpshufb %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpaddb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT: vpunpckhdq {{.*#+}} ymm5 = ymm1[2],ymm3[2],ymm1[3],ymm3[3],ymm1[6],ymm3[6],ymm1[7],ymm3[7]
+; AVX2-NEXT: vpsadbw %ymm3, %ymm5, %ymm5
+; AVX2-NEXT: vpunpckldq {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[1],ymm3[1],ymm1[4],ymm3[4],ymm1[5],ymm3[5]
+; AVX2-NEXT: vpsadbw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpackuswb %ymm5, %ymm1, %ymm1
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpshufb %ymm5, %ymm4, %ymm5
+; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpaddb %ymm5, %ymm0, %ymm0
+; AVX2-NEXT: vpunpckhdq {{.*#+}} ymm2 = ymm0[2],ymm3[2],ymm0[3],ymm3[3],ymm0[6],ymm3[6],ymm0[7],ymm3[7]
+; AVX2-NEXT: vpsadbw %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[1],ymm3[1],ymm0[4],ymm3[4],ymm0[5],ymm3[5]
+; AVX2-NEXT: vpsadbw %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512VL-LABEL: reduce_ctpop_v16i32:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpbroadcastb {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT: vpandq %zmm1, %zmm0, %zmm2
+; AVX512VL-NEXT: vbroadcasti32x4 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512VL-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512VL-NEXT: vpshufb %zmm2, %zmm3, %zmm2
+; AVX512VL-NEXT: vpsrlw $4, %zmm0, %zmm0
+; AVX512VL-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT: vpshufb %zmm0, %zmm3, %zmm0
+; AVX512VL-NEXT: vpaddb %zmm2, %zmm0, %zmm0
+; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpunpckhdq {{.*#+}} zmm2 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
+; AVX512VL-NEXT: vpsadbw %zmm1, %zmm2, %zmm2
+; AVX512VL-NEXT: vpunpckldq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
+; AVX512VL-NEXT: vpsadbw %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
+; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX512VL-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovd %xmm0, %eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512VPOPCNT-LABEL: reduce_ctpop_v16i32:
+; AVX512VPOPCNT: # %bb.0:
+; AVX512VPOPCNT-NEXT: vpopcntd %zmm0, %zmm0
+; AVX512VPOPCNT-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VPOPCNT-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VPOPCNT-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNT-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX512VPOPCNT-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNT-NEXT: vmovd %xmm0, %eax
+; AVX512VPOPCNT-NEXT: vzeroupper
+; AVX512VPOPCNT-NEXT: retq
+ %p0 = tail call <16 x i32> @llvm.ctpop.v16i32(<16 x i32> %a0)
+ %r0 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %p0)
+ ret i32 %r0
+}
+
+define i64 @reduce_ctpop_v16i64(<16 x i64> %a0) {
+; SSE42-LABEL: reduce_ctpop_v16i64:
+; SSE42: # %bb.0:
+; SSE42-NEXT: movdqa {{.*#+}} xmm9 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE42-NEXT: movdqa %xmm5, %xmm10
+; SSE42-NEXT: pand %xmm9, %xmm10
+; SSE42-NEXT: movdqa {{.*#+}} xmm8 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSE42-NEXT: movdqa %xmm8, %xmm11
+; SSE42-NEXT: pshufb %xmm10, %xmm11
+; SSE42-NEXT: psrlw $4, %xmm5
+; SSE42-NEXT: pand %xmm9, %xmm5
+; SSE42-NEXT: movdqa %xmm8, %xmm12
+; SSE42-NEXT: pshufb %xmm5, %xmm12
+; SSE42-NEXT: paddb %xmm11, %xmm12
+; SSE42-NEXT: pxor %xmm5, %xmm5
+; SSE42-NEXT: psadbw %xmm5, %xmm12
+; SSE42-NEXT: movdqa %xmm1, %xmm10
+; SSE42-NEXT: pand %xmm9, %xmm10
+; SSE42-NEXT: movdqa %xmm8, %xmm11
+; SSE42-NEXT: pshufb %xmm10, %xmm11
+; SSE42-NEXT: psrlw $4, %xmm1
+; SSE42-NEXT: pand %xmm9, %xmm1
+; SSE42-NEXT: movdqa %xmm8, %xmm10
+; SSE42-NEXT: pshufb %xmm1, %xmm10
+; SSE42-NEXT: paddb %xmm11, %xmm10
+; SSE42-NEXT: psadbw %xmm5, %xmm10
+; SSE42-NEXT: paddq %xmm12, %xmm10
+; SSE42-NEXT: movdqa %xmm7, %xmm1
+; SSE42-NEXT: pand %xmm9, %xmm1
+; SSE42-NEXT: movdqa %xmm8, %xmm11
+; SSE42-NEXT: pshufb %xmm1, %xmm11
+; SSE42-NEXT: psrlw $4, %xmm7
+; SSE42-NEXT: pand %xmm9, %xmm7
+; SSE42-NEXT: movdqa %xmm8, %xmm12
+; SSE42-NEXT: pshufb %xmm7, %xmm12
+; SSE42-NEXT: paddb %xmm11, %xmm12
+; SSE42-NEXT: psadbw %xmm5, %xmm12
+; SSE42-NEXT: movdqa %xmm3, %xmm1
+; SSE42-NEXT: pand %xmm9, %xmm1
+; SSE42-NEXT: movdqa %xmm8, %xmm7
+; SSE42-NEXT: pshufb %xmm1, %xmm7
+; SSE42-NEXT: psrlw $4, %xmm3
+; SSE42-NEXT: pand %xmm9, %xmm3
+; SSE42-NEXT: movdqa %xmm8, %xmm1
+; SSE42-NEXT: pshufb %xmm3, %xmm1
+; SSE42-NEXT: paddb %xmm7, %xmm1
+; SSE42-NEXT: psadbw %xmm5, %xmm1
+; SSE42-NEXT: paddq %xmm12, %xmm1
+; SSE42-NEXT: paddq %xmm10, %xmm1
+; SSE42-NEXT: movdqa %xmm4, %xmm3
+; SSE42-NEXT: pand %xmm9, %xmm3
+; SSE42-NEXT: movdqa %xmm8, %xmm7
+; SSE42-NEXT: pshufb %xmm3, %xmm7
+; SSE42-NEXT: psrlw $4, %xmm4
+; SSE42-NEXT: pand %xmm9, %xmm4
+; SSE42-NEXT: movdqa %xmm8, %xmm10
+; SSE42-NEXT: pshufb %xmm4, %xmm10
+; SSE42-NEXT: paddb %xmm7, %xmm10
+; SSE42-NEXT: psadbw %xmm5, %xmm10
+; SSE42-NEXT: movdqa %xmm0, %xmm3
+; SSE42-NEXT: pand %xmm9, %xmm3
+; SSE42-NEXT: movdqa %xmm8, %xmm4
+; SSE42-NEXT: pshufb %xmm3, %xmm4
+; SSE42-NEXT: psrlw $4, %xmm0
+; SSE42-NEXT: pand %xmm9, %xmm0
+; SSE42-NEXT: movdqa %xmm8, %xmm3
+; SSE42-NEXT: pshufb %xmm0, %xmm3
+; SSE42-NEXT: paddb %xmm4, %xmm3
+; SSE42-NEXT: psadbw %xmm5, %xmm3
+; SSE42-NEXT: paddq %xmm10, %xmm3
+; SSE42-NEXT: movdqa %xmm6, %xmm0
+; SSE42-NEXT: pand %xmm9, %xmm0
+; SSE42-NEXT: movdqa %xmm8, %xmm4
+; SSE42-NEXT: pshufb %xmm0, %xmm4
+; SSE42-NEXT: psrlw $4, %xmm6
+; SSE42-NEXT: pand %xmm9, %xmm6
+; SSE42-NEXT: movdqa %xmm8, %xmm0
+; SSE42-NEXT: pshufb %xmm6, %xmm0
+; SSE42-NEXT: paddb %xmm4, %xmm0
+; SSE42-NEXT: psadbw %xmm5, %xmm0
+; SSE42-NEXT: movdqa %xmm2, %xmm4
+; SSE42-NEXT: pand %xmm9, %xmm4
+; SSE42-NEXT: movdqa %xmm8, %xmm6
+; SSE42-NEXT: pshufb %xmm4, %xmm6
+; SSE42-NEXT: psrlw $4, %xmm2
+; SSE42-NEXT: pand %xmm9, %xmm2
+; SSE42-NEXT: pshufb %xmm2, %xmm8
+; SSE42-NEXT: paddb %xmm6, %xmm8
+; SSE42-NEXT: psadbw %xmm5, %xmm8
+; SSE42-NEXT: paddq %xmm0, %xmm8
+; SSE42-NEXT: paddq %xmm3, %xmm8
+; SSE42-NEXT: paddq %xmm1, %xmm8
+; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm8[2,3,2,3]
+; SSE42-NEXT: paddq %xmm8, %xmm0
+; SSE42-NEXT: movq %xmm0, %rax
+; SSE42-NEXT: retq
+;
+; AVX2-LABEL: reduce_ctpop_v16i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT: vpand %ymm4, %ymm2, %ymm5
+; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX2-NEXT: vpshufb %ymm5, %ymm6, %ymm5
+; AVX2-NEXT: vpsrlw $4, %ymm2, %ymm2
+; AVX2-NEXT: vpand %ymm4, %ymm2, %ymm2
+; AVX2-NEXT: vpshufb %ymm2, %ymm6, %ymm2
+; AVX2-NEXT: vpaddb %ymm5, %ymm2, %ymm2
+; AVX2-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX2-NEXT: vpsadbw %ymm5, %ymm2, %ymm2
+; AVX2-NEXT: vpand %ymm4, %ymm0, %ymm7
+; AVX2-NEXT: vpshufb %ymm7, %ymm6, %ymm7
+; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb %ymm0, %ymm6, %ymm0
+; AVX2-NEXT: vpaddb %ymm7, %ymm0, %ymm0
+; AVX2-NEXT: vpsadbw %ymm5, %ymm0, %ymm0
+; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm2
+; AVX2-NEXT: vpshufb %ymm2, %ymm6, %ymm2
+; AVX2-NEXT: vpsrlw $4, %ymm3, %ymm3
+; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
+; AVX2-NEXT: vpshufb %ymm3, %ymm6, %ymm3
+; AVX2-NEXT: vpaddb %ymm2, %ymm3, %ymm2
+; AVX2-NEXT: vpsadbw %ymm5, %ymm2, %ymm2
+; AVX2-NEXT: vpand %ymm4, %ymm1, %ymm3
+; AVX2-NEXT: vpshufb %ymm3, %ymm6, %ymm3
+; AVX2-NEXT: vpsrlw $4, %ymm1, %ymm1
+; AVX2-NEXT: vpand %ymm4, %ymm1, %ymm1
+; AVX2-NEXT: vpshufb %ymm1, %ymm6, %ymm1
+; AVX2-NEXT: vpaddb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsadbw %ymm5, %ymm1, %ymm1
+; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %rax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512VL-LABEL: reduce_ctpop_v16i64:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpbroadcastb {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT: vpandq %zmm2, %zmm0, %zmm3
+; AVX512VL-NEXT: vbroadcasti32x4 {{.*#+}} zmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512VL-NEXT: # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512VL-NEXT: vpshufb %zmm3, %zmm4, %zmm3
+; AVX512VL-NEXT: vpsrlw $4, %zmm0, %zmm0
+; AVX512VL-NEXT: vpandq %zmm2, %zmm0, %zmm0
+; AVX512VL-NEXT: vpshufb %zmm0, %zmm4, %zmm0
+; AVX512VL-NEXT: vpaddb %zmm3, %zmm0, %zmm0
+; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT: vpsadbw %zmm3, %zmm0, %zmm0
+; AVX512VL-NEXT: vpandq %zmm2, %zmm1, %zmm5
+; AVX512VL-NEXT: vpshufb %zmm5, %zmm4, %zmm5
+; AVX512VL-NEXT: vpsrlw $4, %zmm1, %zmm1
+; AVX512VL-NEXT: vpandq %zmm2, %zmm1, %zmm1
+; AVX512VL-NEXT: vpshufb %zmm1, %zmm4, %zmm1
+; AVX512VL-NEXT: vpaddb %zmm5, %zmm1, %zmm1
+; AVX512VL-NEXT: vpsadbw %zmm3, %zmm1, %zmm1
+; AVX512VL-NEXT: vpmovqb %zmm1, %xmm1
+; AVX512VL-NEXT: vpmovqb %zmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX512VL-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovq %xmm0, %rax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512VPOPCNT-LABEL: reduce_ctpop_v16i64:
+; AVX512VPOPCNT: # %bb.0:
+; AVX512VPOPCNT-NEXT: vpopcntq %zmm0, %zmm0
+; AVX512VPOPCNT-NEXT: vpopcntq %zmm1, %zmm1
+; AVX512VPOPCNT-NEXT: vpmovqb %zmm1, %xmm1
+; AVX512VPOPCNT-NEXT: vpmovqb %zmm0, %xmm0
+; AVX512VPOPCNT-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512VPOPCNT-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VPOPCNT-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNT-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX512VPOPCNT-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNT-NEXT: vmovq %xmm0, %rax
+; AVX512VPOPCNT-NEXT: vzeroupper
+; AVX512VPOPCNT-NEXT: retq
+ %p0 = tail call <16 x i64> @llvm.ctpop.v16i64(<16 x i64> %a0)
+ %r0 = tail call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %p0)
+ ret i64 %r0
+}
+
;
; Vector of reductions of per-element ctpop results (create vector of each count all bits in each vector)
;
diff --git a/llvm/test/DebugInfo/AArch64/select-optimize-trailing-dbg-records.ll b/llvm/test/DebugInfo/AArch64/select-optimize-trailing-dbg-records.ll
new file mode 100644
index 0000000..4ae1fb4
--- /dev/null
+++ b/llvm/test/DebugInfo/AArch64/select-optimize-trailing-dbg-records.ll
@@ -0,0 +1,63 @@
+; RUN: opt %s -passes='require<profile-summary>,function(select-optimize)' -o - -S \
+; RUN: | FileCheck %s
+; RUN: opt %s --try-experimental-debuginfo-iterators -passes='require<profile-summary>,function(select-optimize)' -o - -S \
+; RUN: | FileCheck %s
+
+;; Check that the dbg.value is moved into the start of the end-block of the
+;; inserted if-block.
+
+; CHECK: select.end:
+; CHECK-NEXT: %[[PHI:.*]] = phi i32
+; CHECK-NEXT: dbg.value(metadata i32 %[[PHI]],
+
+source_filename = "test.ll"
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64-unknown-fuchsia"
+
+%struct.hb_glyph_info_t = type { i32, i32, i32, %union._hb_var_int_t, %union._hb_var_int_t }
+%union._hb_var_int_t = type { i32 }
+
+define void @_Z22_hb_ot_shape_normalizePK18hb_ot_shape_plan_tP11hb_buffer_tP9hb_font_t() {
+entry:
+ br label %while.body193
+
+while.body193: ; preds = %while.body193, %entry
+ %starter.0337 = phi i32 [ %spec.select322, %while.body193 ], [ 0, %entry ]
+ %idxprom207 = zext i32 %starter.0337 to i64
+ %arrayidx208 = getelementptr %struct.hb_glyph_info_t, ptr null, i64 %idxprom207
+ %0 = load i32, ptr %arrayidx208, align 4
+ %call247.val = load i16, ptr null, align 4
+ %cmp249327 = icmp ult i16 %call247.val, 0
+ %cmp249 = select i1 false, i1 false, i1 %cmp249327
+ %spec.select322 = select i1 %cmp249, i32 0, i32 %starter.0337
+ tail call void @llvm.dbg.value(metadata i32 %spec.select322, metadata !13, metadata !DIExpression()), !dbg !20
+ br label %while.body193
+}
+
+declare void @llvm.dbg.value(metadata, metadata, metadata)
+
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!12}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !1, isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, retainedTypes: !2, globals: !3, imports: !2, splitDebugInlining: false, nameTableKind: GNU)
+!1 = !DIFile(filename: "../../third_party/harfbuzz-ng/src/src/hb-ot-shape-normalize.cc", directory: ".")
+!2 = !{}
+!3 = !{!4, !9}
+!4 = !DIGlobalVariableExpression(var: !5, expr: !DIExpression())
+!5 = distinct !DIGlobalVariable(scope: null, file: !1, line: 383, type: !6, isLocal: true, isDefinition: true)
+!6 = !DICompositeType(tag: DW_TAG_array_type, baseType: !7, size: 112, elements: !2)
+!7 = !DIDerivedType(tag: DW_TAG_const_type, baseType: !8)
+!8 = !DIBasicType(name: "char", size: 8, encoding: DW_ATE_unsigned_char)
+!9 = !DIGlobalVariableExpression(var: !10, expr: !DIExpression())
+!10 = distinct !DIGlobalVariable(scope: null, file: !1, line: 410, type: !11, isLocal: true, isDefinition: true)
+!11 = !DICompositeType(tag: DW_TAG_array_type, baseType: !7, size: 96, elements: !2)
+!12 = !{i32 2, !"Debug Info Version", i32 3}
+!13 = !DILocalVariable(name: "starter", scope: !14, file: !1, line: 441, type: !19)
+!14 = distinct !DILexicalBlock(scope: !15, file: !1, line: 435, column: 3)
+!15 = distinct !DILexicalBlock(scope: !16, file: !1, line: 431, column: 7)
+!16 = distinct !DISubprogram(name: "_hb_ot_shape_normalize", linkageName: "_Z22_hb_ot_shape_normalizePK18hb_ot_shape_plan_tP11hb_buffer_tP9hb_font_t", scope: !1, file: !1, line: 291, type: !17, scopeLine: 294, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !2)
+!17 = distinct !DISubroutineType(types: !18)
+!18 = !{null}
+!19 = !DIBasicType(name: "unsigned int", size: 32, encoding: DW_ATE_unsigned)
+!20 = !DILocation(line: 0, scope: !14)
diff --git a/llvm/test/DebugInfo/Generic/assignment-tracking/declare-to-assign/hwasan.ll b/llvm/test/DebugInfo/Generic/assignment-tracking/declare-to-assign/hwasan.ll
index c4b209d..f7f126c 100644
--- a/llvm/test/DebugInfo/Generic/assignment-tracking/declare-to-assign/hwasan.ll
+++ b/llvm/test/DebugInfo/Generic/assignment-tracking/declare-to-assign/hwasan.ll
@@ -1,6 +1,7 @@
; RUN: opt %s -S -passes=declare-to-assign -o - | FileCheck %s
+; RUN: opt --try-experimental-debuginfo-iterators %s -S -passes=declare-to-assign -o - | FileCheck %s
-; CHECK: call void @llvm.dbg.declare
+; CHECK: call void @llvm.dbg.assign
define dso_local void @f() sanitize_hwaddress !dbg !9 {
entry:
diff --git a/llvm/test/DebugInfo/Generic/ipsccp-remap-assign-id.ll b/llvm/test/DebugInfo/Generic/ipsccp-remap-assign-id.ll
new file mode 100644
index 0000000..13ac88d
--- /dev/null
+++ b/llvm/test/DebugInfo/Generic/ipsccp-remap-assign-id.ll
@@ -0,0 +1,59 @@
+; RUN: opt -passes=ipsccp %s -S -o - | FileCheck %s
+; RUN: opt --try-experimental-debuginfo-iterators -passes=ipsccp %s -S -o - | FileCheck %s
+
+;; Check the dbg.assign DIAssignID operand gets remapped after cloning.
+
+; CHECK: %tmp = alloca [4096 x i32], i32 0, align 16, !DIAssignID ![[ID1:[0-9]+]]
+; CHECK-NEXT: dbg.assign(metadata i1 undef, metadata !{{.*}}, metadata !DIExpression(), metadata ![[ID1]], metadata ptr %tmp, metadata !DIExpression())
+;
+; CHECK: %tmp = alloca [4096 x i32], i32 0, align 16, !DIAssignID ![[ID2:[0-9]+]]
+; CHECK-NEXT: dbg.assign(metadata i1 undef, metadata !{{.*}}, metadata !DIExpression(), metadata ![[ID2]], metadata ptr %tmp, metadata !DIExpression())
+
+; Function Attrs: mustprogress nocallback nofree nosync nounwind speculatable willreturn memory(none)
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
+
+define void @inv_txfm_add_dct_dct_4x4_c() {
+entry:
+ call void @inv_txfm_add_c(ptr @dav1d_inv_dct4_1d_c)
+ ret void
+}
+
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg)
+
+; Function Attrs: noinline
+define void @inv_txfm_add_c(ptr %first_1d_fn) #2 {
+entry:
+ %tmp = alloca [4096 x i32], i32 0, align 16, !DIAssignID !5
+ tail call void @llvm.dbg.assign(metadata i1 undef, metadata !6, metadata !DIExpression(), metadata !5, metadata ptr %tmp, metadata !DIExpression()), !dbg !16
+ call void @llvm.memset.p0.i64(ptr %tmp, i8 0, i64 0, i1 false), !DIAssignID !17
+ call void %first_1d_fn(ptr null, i64 0, i32 0, i32 0)
+ ret void
+}
+
+declare void @dav1d_inv_dct4_1d_c(ptr, i64, i32, i32)
+
+declare void @llvm.dbg.assign(metadata, metadata, metadata, metadata, metadata, metadata)
+
+attributes #2 = { noinline }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C11, file: !1, isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, retainedTypes: !2, splitDebugInlining: false, nameTableKind: GNU)
+!1 = !DIFile(filename: "itx_tmpl.c", directory: ".")
+!2 = !{}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = !{i32 7, !"debug-info-assignment-tracking", i1 true}
+!5 = distinct !DIAssignID()
+!6 = !DILocalVariable(name: "tmp", scope: !7, file: !1, line: 78, type: !10)
+!7 = distinct !DISubprogram(name: "inv_txfm_add_c", scope: !1, file: !1, line: 41, type: !8, scopeLine: 45, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagLocalToUnit | DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !2)
+!8 = distinct !DISubroutineType(types: !9)
+!9 = !{null}
+!10 = !DICompositeType(tag: DW_TAG_array_type, baseType: !11, size: 131072, elements: !2)
+!11 = !DIDerivedType(tag: DW_TAG_typedef, name: "int32_t", file: !12, line: 26, baseType: !13)
+!12 = !DIFile(filename: "stdint-intn.h", directory: ".")
+!13 = !DIDerivedType(tag: DW_TAG_typedef, name: "__int32_t", file: !14, line: 41, baseType: !15)
+!14 = !DIFile(filename: "types.h", directory: ".")
+!15 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!16 = !DILocation(line: 0, scope: !7)
+!17 = distinct !DIAssignID()
diff --git a/llvm/test/DebugInfo/X86/debug-names-dwarf64.ll b/llvm/test/DebugInfo/X86/debug-names-dwarf64.ll
index c15e2ad..9a5fd07 100644
--- a/llvm/test/DebugInfo/X86/debug-names-dwarf64.ll
+++ b/llvm/test/DebugInfo/X86/debug-names-dwarf64.ll
@@ -30,11 +30,6 @@
; CHECK-NEXT: CU[0]: 0x00000000
; CHECK-NEXT: ]
; CHECK-NEXT: Abbreviations [
-; CHECK-NEXT: Abbreviation [[ABBREV_LABEL:0x[0-9a-f]*]] {
-; CHECK-NEXT: Tag: DW_TAG_label
-; CHECK-NEXT: DW_IDX_die_offset: DW_FORM_ref4
-; CHECK-NEXT: DW_IDX_parent: DW_FORM_ref4
-; CHECK-NEXT: }
; CHECK-NEXT: Abbreviation [[ABBREV:0x[0-9a-f]*]] {
; CHECK-NEXT: Tag: DW_TAG_base_type
; CHECK-NEXT: DW_IDX_die_offset: DW_FORM_ref4
@@ -50,6 +45,11 @@
; CHECK-NEXT: DW_IDX_die_offset: DW_FORM_ref4
; CHECK-NEXT: DW_IDX_parent: DW_FORM_flag_present
; CHECK-NEXT: }
+; CHECK-NEXT: Abbreviation [[ABBREV_LABEL:0x[0-9a-f]*]] {
+; CHECK-NEXT: Tag: DW_TAG_label
+; CHECK-NEXT: DW_IDX_die_offset: DW_FORM_ref4
+; CHECK-NEXT: DW_IDX_parent: DW_FORM_ref4
+; CHECK-NEXT: }
; CHECK-NEXT: ]
; CHECK-NEXT: Bucket 0 [
; CHECK-NEXT: Name 1 {
diff --git a/llvm/test/DebugInfo/X86/debug-names-types.ll b/llvm/test/DebugInfo/X86/debug-names-types.ll
index f41bb55..ff0d4d5 100644
--- a/llvm/test/DebugInfo/X86/debug-names-types.ll
+++ b/llvm/test/DebugInfo/X86/debug-names-types.ll
@@ -37,20 +37,14 @@
; CHECK-NEXT: LocalTU[0]: 0x00000000
; CHECK-NEXT: ]
; CHECK: Abbreviations [
-; CHECK-NEXT: Abbreviation [[ABBREV3:0x[0-9a-f]*]] {
-; CHECK-NEXT: Tag: DW_TAG_structure_type
-; CHECK-NEXT: DW_IDX_type_unit: DW_FORM_data1
-; CHECK-NEXT: DW_IDX_die_offset: DW_FORM_ref4
-; CHECK-NEXT: DW_IDX_parent: DW_FORM_flag_present
-; CHECK-NEXT: }
-; CHECK-NEXT: Abbreviation [[ABBREV4:0x[0-9a-f]*]] {
+; CHECK-NEXT: Abbreviation [[ABBREV:0x[0-9a-f]*]] {
; CHECK-NEXT: Tag: DW_TAG_base_type
-; CHECK-NEXT: DW_IDX_type_unit: DW_FORM_data1
; CHECK-NEXT: DW_IDX_die_offset: DW_FORM_ref4
; CHECK-NEXT: DW_IDX_parent: DW_FORM_flag_present
; CHECK-NEXT: }
-; CHECK-NEXT: Abbreviation [[ABBREV:0x[0-9a-f]*]] {
-; CHECK-NEXT: Tag: DW_TAG_base_type
+; CHECK-NEXT: Abbreviation [[ABBREV3:0x[0-9a-f]*]] {
+; CHECK-NEXT: Tag: DW_TAG_structure_type
+; CHECK-NEXT: DW_IDX_type_unit: DW_FORM_data1
; CHECK-NEXT: DW_IDX_die_offset: DW_FORM_ref4
; CHECK-NEXT: DW_IDX_parent: DW_FORM_flag_present
; CHECK-NEXT: }
@@ -64,6 +58,12 @@
; CHECK-NEXT: DW_IDX_die_offset: DW_FORM_ref4
; CHECK-NEXT: DW_IDX_parent: DW_FORM_flag_present
; CHECK-NEXT: }
+; CHECK-NEXT: Abbreviation [[ABBREV4:0x[0-9a-f]*]] {
+; CHECK-NEXT: Tag: DW_TAG_base_type
+; CHECK-NEXT: DW_IDX_type_unit: DW_FORM_data1
+; CHECK-NEXT: DW_IDX_die_offset: DW_FORM_ref4
+; CHECK-NEXT: DW_IDX_parent: DW_FORM_flag_present
+; CHECK-NEXT: }
; CHECK-NEXT: ]
; CHECK-NEXT: Bucket 0 [
; CHECK-NEXT: Name 1 {
@@ -130,7 +130,7 @@
; CHECK-SPLIT: Foreign TU count: 1
; CHECK-SPLIT-NEXT: Bucket count: 4
; CHECK-SPLIT-NEXT: Name count: 4
-; CHECK-SPLIT-NEXT: Abbreviations table size: 0x32
+; CHECK-SPLIT-NEXT: Abbreviations table size: 0x2D
; CHECK-SPLIT-NEXT: Augmentation: 'LLVM0700'
; CHECK-SPLIT-NEXT: }
; CHECK-SPLIT-NEXT: Compilation Unit offsets [
@@ -140,20 +140,14 @@
; CHECK-SPLIT-NEXT: ForeignTU[0]: 0x675d23e4f33235f2
; CHECK-SPLIT-NEXT: ]
; CHECK-SPLIT-NEXT: Abbreviations [
-; CHECK-SPLIT-NEXT: Abbreviation [[ABBREV1:0x[0-9a-f]*]] {
-; CHECK-SPLIT-NEXT: Tag: DW_TAG_structure_type
-; CHECK-SPLIT-NEXT: DW_IDX_type_unit: DW_FORM_data1
-; CHECK-SPLIT-NEXT: DW_IDX_die_offset: DW_FORM_ref4
-; CHECK-SPLIT-NEXT: DW_IDX_parent: DW_FORM_flag_present
-; CHECK-SPLIT-NEXT: }
-; CHECK-SPLIT-NEXT: Abbreviation [[ABBREV4:0x[0-9a-f]*]] {
+; CHECK-SPLIT-NEXT: Abbreviation [[ABBREV2:0x[0-9a-f]*]] {
; CHECK-SPLIT-NEXT: Tag: DW_TAG_base_type
-; CHECK-SPLIT-NEXT: DW_IDX_type_unit: DW_FORM_data1
; CHECK-SPLIT-NEXT: DW_IDX_die_offset: DW_FORM_ref4
; CHECK-SPLIT-NEXT: DW_IDX_parent: DW_FORM_flag_present
; CHECK-SPLIT-NEXT: }
-; CHECK-SPLIT-NEXT: Abbreviation [[ABBREV2:0x[0-9a-f]*]] {
-; CHECK-SPLIT-NEXT: Tag: DW_TAG_base_type
+; CHECK-SPLIT-NEXT: Abbreviation [[ABBREV1:0x[0-9a-f]*]] {
+; CHECK-SPLIT-NEXT: Tag: DW_TAG_structure_type
+; CHECK-SPLIT-NEXT: DW_IDX_type_unit: DW_FORM_data1
; CHECK-SPLIT-NEXT: DW_IDX_die_offset: DW_FORM_ref4
; CHECK-SPLIT-NEXT: DW_IDX_parent: DW_FORM_flag_present
; CHECK-SPLIT-NEXT: }
@@ -167,6 +161,12 @@
; CHECK-SPLIT-NEXT: DW_IDX_die_offset: DW_FORM_ref4
; CHECK-SPLIT-NEXT: DW_IDX_parent: DW_FORM_flag_present
; CHECK-SPLIT-NEXT: }
+; CHECK-SPLIT-NEXT: Abbreviation [[ABBREV4:0x[0-9a-f]*]] {
+; CHECK-SPLIT-NEXT: Tag: DW_TAG_base_type
+; CHECK-SPLIT-NEXT: DW_IDX_type_unit: DW_FORM_data1
+; CHECK-SPLIT-NEXT: DW_IDX_die_offset: DW_FORM_ref4
+; CHECK-SPLIT-NEXT: DW_IDX_parent: DW_FORM_flag_present
+; CHECK-SPLIT-NEXT: }
; CHECK-SPLIT-NEXT: ]
; CHECK-SPLIT-NEXT: Bucket 0 [
; CHECK-SPLIT-NEXT: Name 1 {
diff --git a/llvm/test/DebugInfo/X86/dwarfdump-str-offsets.s b/llvm/test/DebugInfo/X86/dwarfdump-str-offsets.s
index 1725813..66dfb5f 100644
--- a/llvm/test/DebugInfo/X86/dwarfdump-str-offsets.s
+++ b/llvm/test/DebugInfo/X86/dwarfdump-str-offsets.s
@@ -1,6 +1,9 @@
# RUN: llvm-mc -triple x86_64-unknown-linux %s -filetype=obj -o %t.o
# RUN: llvm-dwarfdump -v %t.o 2> %t.err | FileCheck --check-prefixes=COMMON,SPLIT,OFFSETS %s
-# RUN: llvm-dwarfdump -verify %t.o | FileCheck --check-prefix=VERIFY %s
+
+# FIXME: the verifier does not accept padding between debug-str-offset
+# sections, which this test uses.
+# RUN: llvm-dwarfdump -verify --debug-info %t.o | FileCheck --check-prefix=VERIFY %s
# RUN: llvm-dwarfdump -debug-str-offsets %t.o | FileCheck --check-prefix=OFFSETS %s
#
# Check that we don't report an error on a non-existent range list table.
diff --git a/llvm/test/DebugInfo/X86/skeleton-unit-verify.s b/llvm/test/DebugInfo/X86/skeleton-unit-verify.s
index 92a3df4..6aaac18 100644
--- a/llvm/test/DebugInfo/X86/skeleton-unit-verify.s
+++ b/llvm/test/DebugInfo/X86/skeleton-unit-verify.s
@@ -11,6 +11,8 @@
# CHECK-NEXT: DW_TAG_skeleton_unit
# CHECK-NEXT: error: Skeleton compilation unit has children.
# CHECK-NEXT: Verifying dwo Units...
+# CHECK-NEXT: Verifying .debug_line...
+# CHECK-NEXT: Verifying .debug_str_offsets...
# CHECK-NEXT: Errors detected.
.section .debug_abbrev,"",@progbits
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/dbg-assign-tag-offset.ll b/llvm/test/Instrumentation/HWAddressSanitizer/dbg-assign-tag-offset.ll
new file mode 100644
index 0000000..ec8d034
--- /dev/null
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/dbg-assign-tag-offset.ll
@@ -0,0 +1,60 @@
+; RUN: opt -passes=hwasan -S -o - %s | FileCheck %s
+; RUN: opt --try-experimental-debuginfo-iterators -passes=hwasan -S -o - %s | FileCheck %s
+
+source_filename = "test.ll"
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64--linux-android"
+
+declare void @g(ptr, ptr, ptr, ptr, ptr, ptr)
+
+; Function Attrs: sanitize_hwaddress
+define void @f() #0 !dbg !7 {
+entry:
+ %nodebug0 = alloca ptr, align 8
+ %nodebug1 = alloca ptr, align 8
+ %nodebug2 = alloca ptr, align 8
+ %nodebug3 = alloca ptr, align 8
+ ; CHECK: %a = alloca{{.*}} !DIAssignID ![[ID1:[0-9]+]]
+ %a = alloca ptr, align 8, !DIAssignID !13
+ ; CHECK: @llvm.dbg.assign{{.*}} metadata ![[ID1]]{{.*}} !DIExpression(DW_OP_LLVM_tag_offset, 32)
+ call void @llvm.dbg.assign(metadata i1 undef, metadata !14, metadata !DIExpression(), metadata !13, metadata ptr %a, metadata !DIExpression()), !dbg !15
+ ; CHECK: %b = alloca{{.*}} !DIAssignID ![[ID2:[0-9]+]]
+ %b = alloca ptr, align 8, !DIAssignID !16
+ ; CHECK: @llvm.dbg.assign{{.*}} metadata ![[ID2]]{{.*}} !DIExpression(DW_OP_LLVM_tag_offset, 96)
+ call void @llvm.dbg.assign(metadata i1 undef, metadata !17, metadata !DIExpression(), metadata !16, metadata ptr %b, metadata !DIExpression()), !dbg !15
+ call void @g(ptr %nodebug0, ptr %nodebug1, ptr %nodebug2, ptr %nodebug3, ptr %a, ptr %b)
+ ret void, !dbg !18
+}
+
+; Function Attrs: nocallback nofree nosync nounwind speculatable willreturn memory(none)
+declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+
+; Function Attrs: nocallback nofree nosync nounwind speculatable willreturn memory(none)
+declare void @llvm.dbg.assign(metadata, metadata, metadata, metadata, metadata, metadata) #1
+
+attributes #0 = { sanitize_hwaddress }
+attributes #1 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4, !5}
+!llvm.ident = !{!6}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "x.c", directory: "/")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{i32 7, !"debug-info-assignment-tracking", i1 true}
+!6 = !{!"clang"}
+!7 = distinct !DISubprogram(name: "f", scope: !1, file: !1, line: 1, type: !8, scopeLine: 1, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !0, retainedNodes: !2)
+!8 = !DISubroutineType(types: !9)
+!9 = !{null, !10}
+!10 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !11, size: 64)
+!11 = !DIDerivedType(tag: DW_TAG_const_type, baseType: !12)
+!12 = !DIBasicType(name: "char", size: 8, encoding: DW_ATE_signed_char)
+!13 = distinct !DIAssignID()
+!14 = !DILocalVariable(name: "a", scope: !7, file: !1, line: 1, type: !10)
+!15 = !DILocation(line: 0, scope: !7)
+!16 = distinct !DIAssignID()
+!17 = !DILocalVariable(name: "b", scope: !7, file: !1, line: 1, type: !10)
+!18 = !DILocation(line: 1, column: 37, scope: !7)
diff --git a/llvm/test/MC/BPF/insn-unit.s b/llvm/test/MC/BPF/insn-unit.s
index 58342cd..224eb73 100644
--- a/llvm/test/MC/BPF/insn-unit.s
+++ b/llvm/test/MC/BPF/insn-unit.s
@@ -61,6 +61,9 @@
// CHECK-32: c3 92 10 00 00 00 00 00 lock *(u32 *)(r2 + 16) += w9
// CHECK: db a3 e2 ff 00 00 00 00 lock *(u64 *)(r3 - 30) += r10
+ callx r2
+// CHECK: 8d 02 00 00 00 00 00 00 callx r2
+
// ======== BPF_JMP Class ========
if r1 & r2 goto Llabel0 // BPF_JSET | BPF_X
if r1 & 0xffff goto Llabel0 // BPF_JSET | BPF_K
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_ldsdir.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_ldsdir.txt
index d9803fd..0dd97bc 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_ldsdir.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_ldsdir.txt
@@ -1,5 +1,4 @@
# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -disassemble -show-encoding < %s | FileCheck -check-prefix=GFX11 %s
-# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=-WavefrontSize32,+WavefrontSize64 -disassemble -show-encoding < %s | FileCheck -check-prefix=GFX11 %s
# GFX11: lds_direct_load v10 wait_vdst:6 ; encoding: [0x0a,0x00,0x16,0xce]
0x0a,0x00,0x16,0xce
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vdsdir.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vdsdir.txt
index b7c0394..705dd64 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vdsdir.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vdsdir.txt
@@ -1,5 +1,4 @@
# RUN: llvm-mc -arch=amdgcn -mcpu=gfx1200 -disassemble -show-encoding < %s | FileCheck --strict-whitespace -check-prefix=GFX12 %s
-# RUN: llvm-mc -arch=amdgcn -mcpu=gfx1200 -mattr=-WavefrontSize32,+WavefrontSize64 -disassemble -show-encoding < %s | FileCheck --strict-whitespace -check-prefix=GFX12 %s
# GFX12: ds_direct_load v10 wait_va_vdst:6 wait_vm_vsrc:0 ; encoding: [0x0a,0x00,0x16,0xce]
0x0a,0x00,0x16,0xce
diff --git a/llvm/test/MC/Mips/macro-la-pic.s b/llvm/test/MC/Mips/macro-la-pic.s
index 2303f34..1875952 100644
--- a/llvm/test/MC/Mips/macro-la-pic.s
+++ b/llvm/test/MC/Mips/macro-la-pic.s
@@ -255,3 +255,25 @@ la $25, 2f
# XN32: lw $25, %got_disp(.Ltmp1)($gp) # encoding: [0x8f,0x99,A,A]
# XN32: # fixup A - offset: 0, value: %got_disp(.Ltmp1), kind: fixup_Mips_GOT_DISP
2:
+
+la $2,.Lstr
+# O32: lw $2, %got(.Lstr)($gp) # encoding: [0x8f,0x82,A,A]
+# O32-NEXT: # fixup A - offset: 0, value: %got(.Lstr), kind: fixup_Mips_GOT
+# O32-NEXT: addiu $2, $2, %lo(.Lstr) # encoding: [0x24,0x42,A,A]
+# O32-NEXT: # fixup A - offset: 0, value: %lo(.Lstr), kind: fixup_Mips_LO16
+
+# N32: lw $2, %got_disp(.Lstr)($gp) # encoding: [0x8f,0x82,A,A]
+# N32-NEXT: # fixup A - offset: 0, value: %got_disp(.Lstr), kind: fixup_Mips_GOT_DISP
+
+la $2,$str2
+# O32: lw $2, %got($str2)($gp) # encoding: [0x8f,0x82,A,A]
+# O32-NEXT: # fixup A - offset: 0, value: %got($str2), kind: fixup_Mips_GOT
+# O32-NEXT: addiu $2, $2, %lo($str2) # encoding: [0x24,0x42,A,A]
+# O32-NEXT: # fixup A - offset: 0, value: %lo($str2), kind: fixup_Mips_LO16
+
+# N32: lw $2, %got_disp($str2)($gp) # encoding: [0x8f,0x82,A,A]
+# N32-NEXT: # fixup A - offset: 0, value: %got_disp($str2), kind: fixup_Mips_GOT_DISP
+
+.rodata
+.Lstr: .4byte 0
+$str2: .4byte 0
diff --git a/llvm/test/TableGen/GlobalISelCombinerEmitter/builtins/match-table-replacerreg.td b/llvm/test/TableGen/GlobalISelCombinerEmitter/builtins/match-table-replacerreg.td
index cf57a24..622d1df 100644
--- a/llvm/test/TableGen/GlobalISelCombinerEmitter/builtins/match-table-replacerreg.td
+++ b/llvm/test/TableGen/GlobalISelCombinerEmitter/builtins/match-table-replacerreg.td
@@ -26,13 +26,13 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
ReplaceTemp
]>;
-// CHECK: const uint8_t *GenMyCombiner::getMatchTable() const {
+// CHECK: const uint8_t *GenMyCombiner::getMatchTable() const {
// CHECK-NEXT: constexpr static uint8_t MatchTable0[] = {
-// CHECK-NEXT: GIM_SwitchOpcode, /*MI*/0, /*[*/GIMT_Encode2(65), GIMT_Encode2(181), /*)*//*default:*//*Label 2*/ GIMT_Encode4(558),
-// CHECK-NEXT: /*TargetOpcode::G_UNMERGE_VALUES*//*Label 0*/ GIMT_Encode4(474), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
-// CHECK-NEXT: /*TargetOpcode::G_FNEG*//*Label 1*/ GIMT_Encode4(526),
-// CHECK-NEXT: // Label 0: @474
-// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 3*/ GIMT_Encode4(525), // Rule ID 1 //
+// CHECK-NEXT: GIM_SwitchOpcode, /*MI*/0, /*[*/GIMT_Encode2(65), GIMT_Encode2(182), /*)*//*default:*//*Label 2*/ GIMT_Encode4(562),
+// CHECK-NEXT: /*TargetOpcode::G_UNMERGE_VALUES*//*Label 0*/ GIMT_Encode4(478), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
+// CHECK-NEXT: /*TargetOpcode::G_FNEG*//*Label 1*/ GIMT_Encode4(530),
+// CHECK-NEXT: // Label 0: @478
+// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 3*/ GIMT_Encode4(529), // Rule ID 1 //
// CHECK-NEXT: GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule1Enabled),
// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/3,
// CHECK-NEXT: // MIs[0] a
@@ -57,10 +57,10 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
// CHECK-NEXT: GIR_ReplaceRegWithTempReg, /*OldInsnID*/0, /*OldOpIdx*/1, /*TempRegID*/0,
// CHECK-NEXT: GIR_EraseFromParent, /*InsnID*/0,
// CHECK-NEXT: GIR_Done,
-// CHECK-NEXT: // Label 3: @525
+// CHECK-NEXT: // Label 3: @529
// CHECK-NEXT: GIM_Reject,
-// CHECK-NEXT: // Label 1: @526
-// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 4*/ GIMT_Encode4(557), // Rule ID 0 //
+// CHECK-NEXT: // Label 1: @530
+// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 4*/ GIMT_Encode4(561), // Rule ID 0 //
// CHECK-NEXT: GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule0Enabled),
// CHECK-NEXT: // MIs[0] dst
// CHECK-NEXT: // No operand predicates
@@ -75,10 +75,10 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
// CHECK-NEXT: GIR_ReplaceReg, /*OldInsnID*/0, /*OldOpIdx*/0, /*NewInsnId*/1, /*NewOpIdx*/1,
// CHECK-NEXT: GIR_EraseFromParent, /*InsnID*/0,
// CHECK-NEXT: GIR_Done,
-// CHECK-NEXT: // Label 4: @557
+// CHECK-NEXT: // Label 4: @561
// CHECK-NEXT: GIM_Reject,
-// CHECK-NEXT: // Label 2: @558
+// CHECK-NEXT: // Label 2: @562
// CHECK-NEXT: GIM_Reject,
-// CHECK-NEXT: }; // Size: 559 bytes
+// CHECK-NEXT: }; // Size: 563 bytes
// CHECK-NEXT: return MatchTable0;
// CHECK-NEXT: }
diff --git a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-imms.td b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-imms.td
index d0c0eba..f0ca65a 100644
--- a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-imms.td
+++ b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-imms.td
@@ -32,14 +32,14 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
CImmInstTest1
]>;
-// CHECK: const uint8_t *GenMyCombiner::getMatchTable() const {
+// CHECK: const uint8_t *GenMyCombiner::getMatchTable() const {
// CHECK-NEXT: constexpr static uint8_t MatchTable0[] = {
-// CHECK-NEXT: GIM_SwitchOpcode, /*MI*/0, /*[*/GIMT_Encode2(19), GIMT_Encode2(127), /*)*//*default:*//*Label 3*/ GIMT_Encode4(559),
-// CHECK-NEXT: /*TargetOpcode::COPY*//*Label 0*/ GIMT_Encode4(442), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
-// CHECK-NEXT: /*TargetOpcode::G_CONSTANT*//*Label 1*/ GIMT_Encode4(473), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
-// CHECK-NEXT: /*TargetOpcode::G_ZEXT*//*Label 2*/ GIMT_Encode4(519),
-// CHECK-NEXT: // Label 0: @442
-// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 4*/ GIMT_Encode4(472), // Rule ID 0 //
+// CHECK-NEXT: GIM_SwitchOpcode, /*MI*/0, /*[*/GIMT_Encode2(19), GIMT_Encode2(128), /*)*//*default:*//*Label 3*/ GIMT_Encode4(563),
+// CHECK-NEXT: /*TargetOpcode::COPY*//*Label 0*/ GIMT_Encode4(446), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
+// CHECK-NEXT: /*TargetOpcode::G_CONSTANT*//*Label 1*/ GIMT_Encode4(477), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
+// CHECK-NEXT: /*TargetOpcode::G_ZEXT*//*Label 2*/ GIMT_Encode4(523),
+// CHECK-NEXT: // Label 0: @446
+// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 4*/ GIMT_Encode4(476), // Rule ID 0 //
// CHECK-NEXT: GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule0Enabled),
// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/1, /*Type*/GILLT_s32,
// CHECK-NEXT: // MIs[0] a
@@ -51,10 +51,10 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
// CHECK-NEXT: GIR_AddImm8, /*InsnID*/0, /*Imm*/0,
// CHECK-NEXT: GIR_EraseFromParent, /*InsnID*/0,
// CHECK-NEXT: GIR_Done,
-// CHECK-NEXT: // Label 4: @472
+// CHECK-NEXT: // Label 4: @476
// CHECK-NEXT: GIM_Reject,
-// CHECK-NEXT: // Label 1: @473
-// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 5*/ GIMT_Encode4(518), // Rule ID 2 //
+// CHECK-NEXT: // Label 1: @477
+// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 5*/ GIMT_Encode4(522), // Rule ID 2 //
// CHECK-NEXT: GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule2Enabled),
// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/1, /*Type*/GILLT_s32,
// CHECK-NEXT: // MIs[0] a
@@ -66,10 +66,10 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
// CHECK-NEXT: GIR_AddCImm, /*InsnID*/0, /*Type*/GILLT_s32, /*Imm*/GIMT_Encode8(42),
// CHECK-NEXT: GIR_EraseFromParent, /*InsnID*/0,
// CHECK-NEXT: GIR_Done,
-// CHECK-NEXT: // Label 5: @518
+// CHECK-NEXT: // Label 5: @522
// CHECK-NEXT: GIM_Reject,
-// CHECK-NEXT: // Label 2: @519
-// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 6*/ GIMT_Encode4(558), // Rule ID 1 //
+// CHECK-NEXT: // Label 2: @523
+// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 6*/ GIMT_Encode4(562), // Rule ID 1 //
// CHECK-NEXT: GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule1Enabled),
// CHECK-NEXT: // MIs[0] a
// CHECK-NEXT: // No operand predicates
@@ -83,10 +83,10 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
// CHECK-NEXT: GIR_AddSimpleTempRegister, /*InsnID*/0, /*TempRegID*/0,
// CHECK-NEXT: GIR_EraseFromParent, /*InsnID*/0,
// CHECK-NEXT: GIR_Done,
-// CHECK-NEXT: // Label 6: @558
+// CHECK-NEXT: // Label 6: @562
// CHECK-NEXT: GIM_Reject,
-// CHECK-NEXT: // Label 3: @559
+// CHECK-NEXT: // Label 3: @563
// CHECK-NEXT: GIM_Reject,
-// CHECK-NEXT: }; // Size: 560 bytes
+// CHECK-NEXT: }; // Size: 564 bytes
// CHECK-NEXT: return MatchTable0;
// CHECK-NEXT: }
diff --git a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-intrinsics.td b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-intrinsics.td
index 94cc3e5..a446fb7 100644
--- a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-intrinsics.td
+++ b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-intrinsics.td
@@ -27,10 +27,9 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
SpecialIntrins
]>;
-
-// CHECK: const uint8_t *GenMyCombiner::getMatchTable() const {
+// CHECK: const uint8_t *GenMyCombiner::getMatchTable() const {
// CHECK-NEXT: constexpr static uint8_t MatchTable0[] = {
-// CHECK-NEXT: GIM_SwitchOpcode, /*MI*/0, /*[*/GIMT_Encode2(114), GIMT_Encode2(116), /*)*//*default:*//*Label 2*/ GIMT_Encode4(132),
+// CHECK-NEXT: GIM_SwitchOpcode, /*MI*/0, /*[*/GIMT_Encode2(115), GIMT_Encode2(117), /*)*//*default:*//*Label 2*/ GIMT_Encode4(132),
// CHECK-NEXT: /*TargetOpcode::G_INTRINSIC*//*Label 0*/ GIMT_Encode4(18),
// CHECK-NEXT: /*TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS*//*Label 1*/ GIMT_Encode4(73),
// CHECK-NEXT: // Label 0: @18
diff --git a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-patfrag-root.td b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-patfrag-root.td
index fdcb31e..d3c202c 100644
--- a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-patfrag-root.td
+++ b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-patfrag-root.td
@@ -26,9 +26,9 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
Test0
]>;
-// CHECK: const uint8_t *GenMyCombiner::getMatchTable() const {
+// CHECK: const uint8_t *GenMyCombiner::getMatchTable() const {
// CHECK-NEXT: constexpr static uint8_t MatchTable0[] = {
-// CHECK-NEXT: GIM_SwitchOpcode, /*MI*/0, /*[*/GIMT_Encode2(119), GIMT_Encode2(182), /*)*//*default:*//*Label 3*/ GIMT_Encode4(380),
+// CHECK-NEXT: GIM_SwitchOpcode, /*MI*/0, /*[*/GIMT_Encode2(120), GIMT_Encode2(183), /*)*//*default:*//*Label 3*/ GIMT_Encode4(380),
// CHECK-NEXT: /*TargetOpcode::G_TRUNC*//*Label 0*/ GIMT_Encode4(262), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
// CHECK-NEXT: /*TargetOpcode::G_ZEXT*//*Label 1*/ GIMT_Encode4(298), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
// CHECK-NEXT: /*TargetOpcode::G_FPEXT*//*Label 2*/ GIMT_Encode4(344),
diff --git a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table.td b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table.td
index 5ec44b5..57ad000 100644
--- a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table.td
+++ b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table.td
@@ -132,15 +132,15 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
// Verify match table.
// CHECK: const uint8_t *GenMyCombiner::getMatchTable() const {
// CHECK-NEXT: constexpr static uint8_t MatchTable0[] = {
-// CHECK-NEXT: GIM_SwitchOpcode, /*MI*/0, /*[*/GIMT_Encode2(19), GIMT_Encode2(127), /*)*//*default:*//*Label 6*/ GIMT_Encode4(657),
-// CHECK-NEXT: /*TargetOpcode::COPY*//*Label 0*/ GIMT_Encode4(442), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
-// CHECK-NEXT: /*TargetOpcode::G_AND*//*Label 1*/ GIMT_Encode4(484), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
-// CHECK-NEXT: /*TargetOpcode::G_STORE*//*Label 2*/ GIMT_Encode4(537), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
-// CHECK-NEXT: /*TargetOpcode::G_TRUNC*//*Label 3*/ GIMT_Encode4(579), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
-// CHECK-NEXT: /*TargetOpcode::G_SEXT*//*Label 4*/ GIMT_Encode4(604), GIMT_Encode4(0),
-// CHECK-NEXT: /*TargetOpcode::G_ZEXT*//*Label 5*/ GIMT_Encode4(617),
-// CHECK-NEXT: // Label 0: @442
-// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 7*/ GIMT_Encode4(471), // Rule ID 4 //
+// CHECK-NEXT: GIM_SwitchOpcode, /*MI*/0, /*[*/GIMT_Encode2(19), GIMT_Encode2(128), /*)*//*default:*//*Label 6*/ GIMT_Encode4(661),
+// CHECK-NEXT: /*TargetOpcode::COPY*//*Label 0*/ GIMT_Encode4(446), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
+// CHECK-NEXT: /*TargetOpcode::G_AND*//*Label 1*/ GIMT_Encode4(488), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
+// CHECK-NEXT: /*TargetOpcode::G_STORE*//*Label 2*/ GIMT_Encode4(541), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
+// CHECK-NEXT: /*TargetOpcode::G_TRUNC*//*Label 3*/ GIMT_Encode4(583), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
+// CHECK-NEXT: /*TargetOpcode::G_SEXT*//*Label 4*/ GIMT_Encode4(608), GIMT_Encode4(0),
+// CHECK-NEXT: /*TargetOpcode::G_ZEXT*//*Label 5*/ GIMT_Encode4(621),
+// CHECK-NEXT: // Label 0: @446
+// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 7*/ GIMT_Encode4(475), // Rule ID 4 //
// CHECK-NEXT: GIM_CheckFeatures, GIMT_Encode2(GIFBS_HasAnswerToEverything),
// CHECK-NEXT: GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule3Enabled),
// CHECK-NEXT: // MIs[0] a
@@ -155,8 +155,8 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
// CHECK-NEXT: // Combiner Rule #3: InstTest1
// CHECK-NEXT: GIR_CustomAction, GIMT_Encode2(GICXXCustomAction_CombineApplyGICombiner0),
// CHECK-NEXT: GIR_Done,
-// CHECK-NEXT: // Label 7: @471
-// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 8*/ GIMT_Encode4(483), // Rule ID 3 //
+// CHECK-NEXT: // Label 7: @475
+// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 8*/ GIMT_Encode4(487), // Rule ID 3 //
// CHECK-NEXT: GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule2Enabled),
// CHECK-NEXT: // MIs[0] a
// CHECK-NEXT: // No operand predicates
@@ -165,10 +165,10 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
// CHECK-NEXT: // Combiner Rule #2: InstTest0
// CHECK-NEXT: GIR_CustomAction, GIMT_Encode2(GICXXCustomAction_CombineApplyGICombiner1),
// CHECK-NEXT: GIR_Done,
-// CHECK-NEXT: // Label 8: @483
+// CHECK-NEXT: // Label 8: @487
// CHECK-NEXT: GIM_Reject,
-// CHECK-NEXT: // Label 1: @484
-// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 9*/ GIMT_Encode4(536), // Rule ID 6 //
+// CHECK-NEXT: // Label 1: @488
+// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 9*/ GIMT_Encode4(540), // Rule ID 6 //
// CHECK-NEXT: GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule5Enabled),
// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/2, /*Type*/GILLT_s32,
// CHECK-NEXT: // MIs[0] dst
@@ -186,10 +186,10 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/1, /*OpIdx*/1, // z
// CHECK-NEXT: GIR_EraseFromParent, /*InsnID*/0,
// CHECK-NEXT: GIR_Done,
-// CHECK-NEXT: // Label 9: @536
+// CHECK-NEXT: // Label 9: @540
// CHECK-NEXT: GIM_Reject,
-// CHECK-NEXT: // Label 2: @537
-// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 10*/ GIMT_Encode4(578), // Rule ID 5 //
+// CHECK-NEXT: // Label 2: @541
+// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 10*/ GIMT_Encode4(582), // Rule ID 5 //
// CHECK-NEXT: GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule4Enabled),
// CHECK-NEXT: // MIs[0] tmp
// CHECK-NEXT: GIM_RecordInsnIgnoreCopies, /*DefineMI*/1, /*MI*/0, /*OpIdx*/0, // MIs[1]
@@ -207,32 +207,32 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
// CHECK-NEXT: GIR_CustomAction, GIMT_Encode2(GICXXCustomAction_CombineApplyGICombiner2),
// CHECK-NEXT: GIR_EraseFromParent, /*InsnID*/0,
// CHECK-NEXT: GIR_Done,
-// CHECK-NEXT: // Label 10: @578
+// CHECK-NEXT: // Label 10: @582
// CHECK-NEXT: GIM_Reject,
-// CHECK-NEXT: // Label 3: @579
-// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 11*/ GIMT_Encode4(591), // Rule ID 0 //
+// CHECK-NEXT: // Label 3: @583
+// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 11*/ GIMT_Encode4(595), // Rule ID 0 //
// CHECK-NEXT: GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule0Enabled),
// CHECK-NEXT: // Combiner Rule #0: WipOpcodeTest0; wip_match_opcode 'G_TRUNC'
// CHECK-NEXT: GIR_CustomAction, GIMT_Encode2(GICXXCustomAction_CombineApplyGICombiner0),
// CHECK-NEXT: GIR_Done,
-// CHECK-NEXT: // Label 11: @591
-// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 12*/ GIMT_Encode4(603), // Rule ID 1 //
+// CHECK-NEXT: // Label 11: @595
+// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 12*/ GIMT_Encode4(607), // Rule ID 1 //
// CHECK-NEXT: GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule1Enabled),
// CHECK-NEXT: // Combiner Rule #1: WipOpcodeTest1; wip_match_opcode 'G_TRUNC'
// CHECK-NEXT: GIR_CustomAction, GIMT_Encode2(GICXXCustomAction_CombineApplyGICombiner0),
// CHECK-NEXT: GIR_Done,
-// CHECK-NEXT: // Label 12: @603
+// CHECK-NEXT: // Label 12: @607
// CHECK-NEXT: GIM_Reject,
-// CHECK-NEXT: // Label 4: @604
-// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 13*/ GIMT_Encode4(616), // Rule ID 2 //
+// CHECK-NEXT: // Label 4: @608
+// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 13*/ GIMT_Encode4(620), // Rule ID 2 //
// CHECK-NEXT: GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule1Enabled),
// CHECK-NEXT: // Combiner Rule #1: WipOpcodeTest1; wip_match_opcode 'G_SEXT'
// CHECK-NEXT: GIR_CustomAction, GIMT_Encode2(GICXXCustomAction_CombineApplyGICombiner0),
// CHECK-NEXT: GIR_Done,
-// CHECK-NEXT: // Label 13: @616
+// CHECK-NEXT: // Label 13: @620
// CHECK-NEXT: GIM_Reject,
-// CHECK-NEXT: // Label 5: @617
-// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 14*/ GIMT_Encode4(656), // Rule ID 7 //
+// CHECK-NEXT: // Label 5: @621
+// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 14*/ GIMT_Encode4(660), // Rule ID 7 //
// CHECK-NEXT: GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule6Enabled),
// CHECK-NEXT: // MIs[0] dst
// CHECK-NEXT: // No operand predicates
@@ -247,10 +247,10 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
// CHECK-NEXT: GIR_AddSimpleTempRegister, /*InsnID*/0, /*TempRegID*/0,
// CHECK-NEXT: GIR_EraseFromParent, /*InsnID*/0,
// CHECK-NEXT: GIR_Done,
-// CHECK-NEXT: // Label 14: @656
+// CHECK-NEXT: // Label 14: @660
// CHECK-NEXT: GIM_Reject,
-// CHECK-NEXT: // Label 6: @657
+// CHECK-NEXT: // Label 6: @661
// CHECK-NEXT: GIM_Reject,
-// CHECK-NEXT: }; // Size: 658 bytes
+// CHECK-NEXT: }; // Size: 662 bytes
// CHECK-NEXT: return MatchTable0;
// CHECK-NEXT: }
diff --git a/llvm/test/TableGen/GlobalISelEmitter.td b/llvm/test/TableGen/GlobalISelEmitter.td
index 3e65126..f79b792 100644
--- a/llvm/test/TableGen/GlobalISelEmitter.td
+++ b/llvm/test/TableGen/GlobalISelEmitter.td
@@ -518,7 +518,7 @@ def : Pat<(frag GPR32:$src1, complex:$src2, complex:$src3),
// R00O-NEXT: GIM_Reject,
// R00O: // Label [[DEFAULT_NUM]]: @[[DEFAULT]]
// R00O-NEXT: GIM_Reject,
-// R00O-NEXT: }; // Size: 2019 bytes
+// R00O-NEXT: }; // Size: 2023 bytes
def INSNBOB : I<(outs GPR32:$dst), (ins GPR32:$src1, GPR32:$src2, GPR32:$src3, GPR32:$src4),
[(set GPR32:$dst,
diff --git a/llvm/test/Transforms/CallSiteSplitting/callsite-split-or-phi.ll b/llvm/test/Transforms/CallSiteSplitting/callsite-split-or-phi.ll
index ee8ce72..6e57cc0 100644
--- a/llvm/test/Transforms/CallSiteSplitting/callsite-split-or-phi.ll
+++ b/llvm/test/Transforms/CallSiteSplitting/callsite-split-or-phi.ll
@@ -2,7 +2,7 @@
; RUN: opt < %s -passes='function(callsite-splitting)' -S | FileCheck %s
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
-target triple = "aarch64-linaro-linux-gnueabi"
+target triple = "aarch64"
;CHECK-LABEL: @test_eq_eq
diff --git a/llvm/test/Transforms/CallSiteSplitting/callsite-split.ll b/llvm/test/Transforms/CallSiteSplitting/callsite-split.ll
index 78b8f5b..256261d 100644
--- a/llvm/test/Transforms/CallSiteSplitting/callsite-split.ll
+++ b/llvm/test/Transforms/CallSiteSplitting/callsite-split.ll
@@ -1,7 +1,7 @@
; RUN: opt < %s -passes='function(callsite-splitting),cgscc(inline),function(instcombine,jump-threading)' -S | FileCheck %s
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
-target triple = "aarch64-linaro-linux-gnueabi"
+target triple = "aarch64"
%struct.bitmap = type { i32, ptr }
diff --git a/llvm/test/Transforms/CalledValuePropagation/simple-arguments.ll b/llvm/test/Transforms/CalledValuePropagation/simple-arguments.ll
index d9739af..150c56c 100644
--- a/llvm/test/Transforms/CalledValuePropagation/simple-arguments.ll
+++ b/llvm/test/Transforms/CalledValuePropagation/simple-arguments.ll
@@ -1,6 +1,6 @@
; RUN: opt -passes=called-value-propagation -S < %s | FileCheck %s
-target triple = "aarch64-unknown-linux-gnueabi"
+target triple = "aarch64"
; This test checks that we propagate the functions through arguments and attach
diff --git a/llvm/test/Transforms/CalledValuePropagation/simple-memory.ll b/llvm/test/Transforms/CalledValuePropagation/simple-memory.ll
index 899bed8..106ed84 100644
--- a/llvm/test/Transforms/CalledValuePropagation/simple-memory.ll
+++ b/llvm/test/Transforms/CalledValuePropagation/simple-memory.ll
@@ -1,6 +1,6 @@
; RUN: opt -passes=called-value-propagation -S < %s | FileCheck %s
-target triple = "aarch64-unknown-linux-gnueabi"
+target triple = "aarch64"
@global_function = internal unnamed_addr global ptr null, align 8
@global_array = common unnamed_addr global ptr null, align 8
diff --git a/llvm/test/Transforms/CalledValuePropagation/simple-select.ll b/llvm/test/Transforms/CalledValuePropagation/simple-select.ll
index e4d35b8..b2ea20a 100644
--- a/llvm/test/Transforms/CalledValuePropagation/simple-select.ll
+++ b/llvm/test/Transforms/CalledValuePropagation/simple-select.ll
@@ -1,6 +1,6 @@
; RUN: opt -passes=called-value-propagation -S < %s | FileCheck %s
-target triple = "aarch64-unknown-linux-gnueabi"
+target triple = "aarch64"
@global_function = internal unnamed_addr global ptr null, align 8
@global_scalar = internal unnamed_addr global i64 zeroinitializer
diff --git a/llvm/test/Transforms/ConstantHoisting/AArch64/const-hoist-gep.ll b/llvm/test/Transforms/ConstantHoisting/AArch64/const-hoist-gep.ll
index da0b2e6..f74dc3e 100644
--- a/llvm/test/Transforms/ConstantHoisting/AArch64/const-hoist-gep.ll
+++ b/llvm/test/Transforms/ConstantHoisting/AArch64/const-hoist-gep.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
; RUN: opt -passes=consthoist -consthoist-gep -S -o - %s | FileCheck %s
-target triple = "aarch64-none--musleabi"
+target triple = "aarch64"
%0 = type { %1, %2, [9 x i16], %6, %7 }
%1 = type { i32, i32, i32, i32, i32, i32, i16, i16, i8, i8, i16, i32, i32, i16, i8, i8 }
diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/basic.ll b/llvm/test/Transforms/CorrelatedValuePropagation/basic.ll
index 93ed008..8dce9ef 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/basic.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/basic.ll
@@ -442,7 +442,7 @@ define i32 @switch_range(i32 %cond) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[S:%.*]] = urem i32 [[COND:%.*]], 3
; CHECK-NEXT: [[S1:%.*]] = add nuw nsw i32 [[S]], 1
-; CHECK-NEXT: switch i32 [[S1]], label [[DEFAULT_UNREACHABLE:%.*]] [
+; CHECK-NEXT: switch i32 [[S1]], label [[UNREACHABLE:%.*]] [
; CHECK-NEXT: i32 1, label [[EXIT1:%.*]]
; CHECK-NEXT: i32 2, label [[EXIT2:%.*]]
; CHECK-NEXT: i32 3, label [[EXIT1]]
@@ -451,8 +451,6 @@ define i32 @switch_range(i32 %cond) {
; CHECK-NEXT: ret i32 1
; CHECK: exit2:
; CHECK-NEXT: ret i32 2
-; CHECK: default.unreachable:
-; CHECK-NEXT: unreachable
; CHECK: unreachable:
; CHECK-NEXT: ret i32 0
;
@@ -515,9 +513,10 @@ define i8 @switch_defaultdest_multipleuse(i8 %t0) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[O:%.*]] = or i8 [[T0:%.*]], 1
; CHECK-NEXT: [[R:%.*]] = srem i8 1, [[O]]
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: default.unreachable:
-; CHECK-NEXT: unreachable
+; CHECK-NEXT: switch i8 [[R]], label [[EXIT:%.*]] [
+; CHECK-NEXT: i8 0, label [[EXIT]]
+; CHECK-NEXT: i8 1, label [[EXIT]]
+; CHECK-NEXT: ]
; CHECK: exit:
; CHECK-NEXT: ret i8 0
;
diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/switch.ll b/llvm/test/Transforms/CorrelatedValuePropagation/switch.ll
deleted file mode 100644
index a0794d5..0000000
--- a/llvm/test/Transforms/CorrelatedValuePropagation/switch.ll
+++ /dev/null
@@ -1,301 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt < %s -passes=correlated-propagation -S | FileCheck %s
-
-define i32 @test_unreachable_default(i32 noundef %num) {
-; CHECK-LABEL: define i32 @test_unreachable_default(
-; CHECK-SAME: i32 noundef [[NUM:%.*]]) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[SUB:%.*]] = add i32 [[NUM]], -120
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[SUB]], 3
-; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 [[SUB]], i32 2
-; CHECK-NEXT: switch i32 [[COND]], label [[DEFAULT_UNREACHABLE:%.*]] [
-; CHECK-NEXT: i32 0, label [[SW_BB:%.*]]
-; CHECK-NEXT: i32 1, label [[SW_BB2:%.*]]
-; CHECK-NEXT: i32 2, label [[SW_BB4:%.*]]
-; CHECK-NEXT: ]
-; CHECK: sw.bb:
-; CHECK-NEXT: [[CALL:%.*]] = call i32 @call0()
-; CHECK-NEXT: br label [[CLEANUP:%.*]]
-; CHECK: sw.bb2:
-; CHECK-NEXT: [[CALL3:%.*]] = call i32 @call1()
-; CHECK-NEXT: br label [[CLEANUP]]
-; CHECK: sw.bb4:
-; CHECK-NEXT: [[CALL5:%.*]] = call i32 @call2()
-; CHECK-NEXT: br label [[CLEANUP]]
-; CHECK: default.unreachable:
-; CHECK-NEXT: unreachable
-; CHECK: sw.default:
-; CHECK-NEXT: [[CALL6:%.*]] = call i32 @call3()
-; CHECK-NEXT: br label [[CLEANUP]]
-; CHECK: cleanup:
-; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL6]], [[SW_DEFAULT:%.*]] ], [ [[CALL5]], [[SW_BB4]] ], [ [[CALL3]], [[SW_BB2]] ], [ [[CALL]], [[SW_BB]] ]
-; CHECK-NEXT: ret i32 [[RETVAL_0]]
-;
-entry:
- %sub = add i32 %num, -120
- %cmp = icmp ult i32 %sub, 3
- %cond = select i1 %cmp, i32 %sub, i32 2
- switch i32 %cond, label %sw.default [
- i32 0, label %sw.bb
- i32 1, label %sw.bb2
- i32 2, label %sw.bb4
- ]
-
-sw.bb:
- %call = call i32 @call0()
- br label %cleanup
-
-sw.bb2:
- %call3 = call i32 @call1()
- br label %cleanup
-
-sw.bb4:
- %call5 = call i32 @call2()
- br label %cleanup
-
-sw.default:
- %call6 = call i32 @call3()
- br label %cleanup
-
-cleanup:
- %retval.0 = phi i32 [ %call6, %sw.default ], [ %call5, %sw.bb4 ], [ %call3, %sw.bb2 ], [ %call, %sw.bb ]
- ret i32 %retval.0
-}
-
-define i32 @test_unreachable_default_shared_edge(i32 noundef %num) {
-; CHECK-LABEL: define i32 @test_unreachable_default_shared_edge(
-; CHECK-SAME: i32 noundef [[NUM:%.*]]) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[SUB:%.*]] = add i32 [[NUM]], -120
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[SUB]], 3
-; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 [[SUB]], i32 2
-; CHECK-NEXT: switch i32 [[COND]], label [[DEFAULT_UNREACHABLE:%.*]] [
-; CHECK-NEXT: i32 0, label [[SW_BB:%.*]]
-; CHECK-NEXT: i32 1, label [[SW_BB2:%.*]]
-; CHECK-NEXT: i32 2, label [[SW_BB4:%.*]]
-; CHECK-NEXT: ]
-; CHECK: sw.bb:
-; CHECK-NEXT: [[CALL:%.*]] = call i32 @call0()
-; CHECK-NEXT: br label [[CLEANUP:%.*]]
-; CHECK: sw.bb2:
-; CHECK-NEXT: [[CALL3:%.*]] = call i32 @call1()
-; CHECK-NEXT: br label [[CLEANUP]]
-; CHECK: default.unreachable:
-; CHECK-NEXT: unreachable
-; CHECK: sw.bb4:
-; CHECK-NEXT: [[CALL5:%.*]] = call i32 @call4(i32 [[SUB]])
-; CHECK-NEXT: br label [[CLEANUP]]
-; CHECK: cleanup:
-; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL5]], [[SW_BB4]] ], [ [[CALL3]], [[SW_BB2]] ], [ [[CALL]], [[SW_BB]] ]
-; CHECK-NEXT: ret i32 [[RETVAL_0]]
-;
-entry:
- %sub = add i32 %num, -120
- %cmp = icmp ult i32 %sub, 3
- %cond = select i1 %cmp, i32 %sub, i32 2
- switch i32 %cond, label %sw.bb4 [
- i32 0, label %sw.bb
- i32 1, label %sw.bb2
- i32 2, label %sw.bb4
- ]
-
-sw.bb:
- %call = call i32 @call0()
- br label %cleanup
-
-sw.bb2:
- %call3 = call i32 @call1()
- br label %cleanup
-
-sw.bb4:
- %val = phi i32 [ %sub, %entry ], [ %sub, %entry ]
- %call5 = call i32 @call4(i32 %val)
- br label %cleanup
-
-cleanup:
- %retval.0 = phi i32 [ %call5, %sw.bb4 ], [ %call3, %sw.bb2 ], [ %call, %sw.bb ]
- ret i32 %retval.0
-}
-
-; Negative tests
-
-define i32 @test_reachable_default(i32 noundef %num) {
-; CHECK-LABEL: define i32 @test_reachable_default(
-; CHECK-SAME: i32 noundef [[NUM:%.*]]) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[SUB:%.*]] = add i32 [[NUM]], -120
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[SUB]], 3
-; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 [[SUB]], i32 4
-; CHECK-NEXT: switch i32 [[COND]], label [[SW_DEFAULT:%.*]] [
-; CHECK-NEXT: i32 0, label [[SW_BB:%.*]]
-; CHECK-NEXT: i32 1, label [[SW_BB2:%.*]]
-; CHECK-NEXT: i32 2, label [[SW_BB4:%.*]]
-; CHECK-NEXT: ]
-; CHECK: sw.bb:
-; CHECK-NEXT: [[CALL:%.*]] = call i32 @call0()
-; CHECK-NEXT: br label [[CLEANUP:%.*]]
-; CHECK: sw.bb2:
-; CHECK-NEXT: [[CALL3:%.*]] = call i32 @call1()
-; CHECK-NEXT: br label [[CLEANUP]]
-; CHECK: sw.bb4:
-; CHECK-NEXT: [[CALL5:%.*]] = call i32 @call2()
-; CHECK-NEXT: br label [[CLEANUP]]
-; CHECK: sw.default:
-; CHECK-NEXT: [[CALL6:%.*]] = call i32 @call3()
-; CHECK-NEXT: br label [[CLEANUP]]
-; CHECK: cleanup:
-; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL6]], [[SW_DEFAULT]] ], [ [[CALL5]], [[SW_BB4]] ], [ [[CALL3]], [[SW_BB2]] ], [ [[CALL]], [[SW_BB]] ]
-; CHECK-NEXT: ret i32 [[RETVAL_0]]
-;
-entry:
- %sub = add i32 %num, -120
- %cmp = icmp ult i32 %sub, 3
- %cond = select i1 %cmp, i32 %sub, i32 4
- switch i32 %cond, label %sw.default [
- i32 0, label %sw.bb
- i32 1, label %sw.bb2
- i32 2, label %sw.bb4
- ]
-
-sw.bb:
- %call = call i32 @call0()
- br label %cleanup
-
-sw.bb2:
- %call3 = call i32 @call1()
- br label %cleanup
-
-sw.bb4:
- %call5 = call i32 @call2()
- br label %cleanup
-
-sw.default:
- %call6 = call i32 @call3()
- br label %cleanup
-
-cleanup:
- %retval.0 = phi i32 [ %call6, %sw.default ], [ %call5, %sw.bb4 ], [ %call3, %sw.bb2 ], [ %call, %sw.bb ]
- ret i32 %retval.0
-}
-
-define i32 @test_unreachable_default_cond_may_be_undef(i32 %num) {
-; CHECK-LABEL: define i32 @test_unreachable_default_cond_may_be_undef(
-; CHECK-SAME: i32 [[NUM:%.*]]) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[SUB:%.*]] = add i32 [[NUM]], -120
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[SUB]], 3
-; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 [[SUB]], i32 2
-; CHECK-NEXT: switch i32 [[COND]], label [[SW_DEFAULT:%.*]] [
-; CHECK-NEXT: i32 0, label [[SW_BB:%.*]]
-; CHECK-NEXT: i32 1, label [[SW_BB2:%.*]]
-; CHECK-NEXT: i32 2, label [[SW_BB4:%.*]]
-; CHECK-NEXT: ]
-; CHECK: sw.bb:
-; CHECK-NEXT: [[CALL:%.*]] = call i32 @call0()
-; CHECK-NEXT: br label [[CLEANUP:%.*]]
-; CHECK: sw.bb2:
-; CHECK-NEXT: [[CALL3:%.*]] = call i32 @call1()
-; CHECK-NEXT: br label [[CLEANUP]]
-; CHECK: sw.bb4:
-; CHECK-NEXT: [[CALL5:%.*]] = call i32 @call2()
-; CHECK-NEXT: br label [[CLEANUP]]
-; CHECK: sw.default:
-; CHECK-NEXT: [[CALL6:%.*]] = call i32 @call3()
-; CHECK-NEXT: br label [[CLEANUP]]
-; CHECK: cleanup:
-; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL6]], [[SW_DEFAULT]] ], [ [[CALL5]], [[SW_BB4]] ], [ [[CALL3]], [[SW_BB2]] ], [ [[CALL]], [[SW_BB]] ]
-; CHECK-NEXT: ret i32 [[RETVAL_0]]
-;
-entry:
- %sub = add i32 %num, -120
- %cmp = icmp ult i32 %sub, 3
- %cond = select i1 %cmp, i32 %sub, i32 2
- switch i32 %cond, label %sw.default [
- i32 0, label %sw.bb
- i32 1, label %sw.bb2
- i32 2, label %sw.bb4
- ]
-
-sw.bb:
- %call = call i32 @call0()
- br label %cleanup
-
-sw.bb2:
- %call3 = call i32 @call1()
- br label %cleanup
-
-sw.bb4:
- %call5 = call i32 @call2()
- br label %cleanup
-
-sw.default:
- %call6 = call i32 @call3()
- br label %cleanup
-
-cleanup:
- %retval.0 = phi i32 [ %call6, %sw.default ], [ %call5, %sw.bb4 ], [ %call3, %sw.bb2 ], [ %call, %sw.bb ]
- ret i32 %retval.0
-}
-
-define i32 @test_default_is_already_unreachable(i32 %num) {
-; CHECK-LABEL: define i32 @test_default_is_already_unreachable(
-; CHECK-SAME: i32 [[NUM:%.*]]) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[SUB:%.*]] = add i32 [[NUM]], -120
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[SUB]], 3
-; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 [[SUB]], i32 2
-; CHECK-NEXT: switch i32 [[COND]], label [[SW_DEFAULT:%.*]] [
-; CHECK-NEXT: i32 0, label [[SW_BB:%.*]]
-; CHECK-NEXT: i32 1, label [[SW_BB2:%.*]]
-; CHECK-NEXT: i32 2, label [[SW_BB4:%.*]]
-; CHECK-NEXT: ]
-; CHECK: sw.bb:
-; CHECK-NEXT: [[CALL:%.*]] = call i32 @call0()
-; CHECK-NEXT: br label [[CLEANUP:%.*]]
-; CHECK: sw.bb2:
-; CHECK-NEXT: [[CALL3:%.*]] = call i32 @call1()
-; CHECK-NEXT: br label [[CLEANUP]]
-; CHECK: sw.bb4:
-; CHECK-NEXT: [[CALL5:%.*]] = call i32 @call2()
-; CHECK-NEXT: br label [[CLEANUP]]
-; CHECK: sw.default:
-; CHECK-NEXT: unreachable
-; CHECK: cleanup:
-; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL5]], [[SW_BB4]] ], [ [[CALL3]], [[SW_BB2]] ], [ [[CALL]], [[SW_BB]] ]
-; CHECK-NEXT: ret i32 [[RETVAL_0]]
-;
-entry:
- %sub = add i32 %num, -120
- %cmp = icmp ult i32 %sub, 3
- %cond = select i1 %cmp, i32 %sub, i32 2
- switch i32 %cond, label %sw.default [
- i32 0, label %sw.bb
- i32 1, label %sw.bb2
- i32 2, label %sw.bb4
- ]
-
-sw.bb:
- %call = call i32 @call0()
- br label %cleanup
-
-sw.bb2:
- %call3 = call i32 @call1()
- br label %cleanup
-
-sw.bb4:
- %call5 = call i32 @call2()
- br label %cleanup
-
-sw.default:
- unreachable
-
-cleanup:
- %retval.0 = phi i32 [ %call5, %sw.bb4 ], [ %call3, %sw.bb2 ], [ %call, %sw.bb ]
- ret i32 %retval.0
-}
-
-declare i32 @call0()
-declare i32 @call1()
-declare i32 @call2()
-declare i32 @call3()
-declare i32 @call4(i32)
diff --git a/llvm/test/Transforms/DeadStoreElimination/noop-stores.ll b/llvm/test/Transforms/DeadStoreElimination/noop-stores.ll
index 3703b8d..9fc20d7 100644
--- a/llvm/test/Transforms/DeadStoreElimination/noop-stores.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/noop-stores.ll
@@ -795,3 +795,360 @@ join:
store i8 %v, ptr %q, align 1
ret void
}
+
+; Dominating condition implies value already exists, optimize store
+define void @remove_tautological_store_eq(ptr %x) {
+; CHECK-LABEL: @remove_tautological_store_eq(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[VAL]], 4
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_EQ:%.*]], label [[END:%.*]]
+; CHECK: if.eq:
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: ret void
+;
+entry:
+ %val = load i32, ptr %x, align 4
+ %cmp = icmp eq i32 %val, 4
+ br i1 %cmp, label %if.eq, label %end
+
+if.eq:
+ store i32 4, ptr %x, align 4
+ br label %end
+
+end:
+ ret void
+}
+
+; Dominating condition implies value already exists, optimize store
+define void @remove_tautological_store_var(ptr %x, ptr %y) {
+; CHECK-LABEL: @remove_tautological_store_var(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[VALX:%.*]] = load i32, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[VALY:%.*]] = load i32, ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[VALX]], [[VALY]]
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_EQ:%.*]], label [[END:%.*]]
+; CHECK: if.eq:
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: ret void
+;
+entry:
+ %valx = load i32, ptr %x, align 4
+ %valy = load i32, ptr %y, align 4
+ %cmp = icmp eq i32 %valx, %valy
+ br i1 %cmp, label %if.eq, label %end
+
+if.eq:
+ store i32 %valy, ptr %x, align 4
+ br label %end
+
+end:
+ ret void
+}
+
+; Dominating condition implies value already exists, optimize store
+define void @remove_tautological_store_ne(ptr %x) {
+; CHECK-LABEL: @remove_tautological_store_ne(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[VAL]], 4
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_NE:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.ne:
+; CHECK-NEXT: br label [[END:%.*]]
+; CHECK: if.else:
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: ret void
+;
+entry:
+ %val = load i32, ptr %x, align 4
+ %cmp = icmp ne i32 %val, 4
+ br i1 %cmp, label %if.ne, label %if.else
+
+if.ne:
+ br label %end
+
+if.else:
+ store i32 4, ptr %x, align 4
+ br label %end
+
+end:
+ ret void
+}
+
+; Dominating condition implies value already exists, optimize store
+; Optimizes unordered atomic stores
+define void @remove_tautological_store_atomic_unordered(ptr %x) {
+; CHECK-LABEL: @remove_tautological_store_atomic_unordered(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[VAL]], 4
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_EQ:%.*]], label [[END:%.*]]
+; CHECK: if.eq:
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: ret void
+;
+entry:
+ %val = load i32, ptr %x, align 4
+ %cmp = icmp eq i32 %val, 4
+ br i1 %cmp, label %if.eq, label %end
+
+if.eq:
+ store atomic i32 4, ptr %x unordered, align 4
+ br label %end
+
+end:
+ ret void
+}
+
+; Should not optimize ordered atomic stores
+define void @remove_tautological_store_atomic_monotonic(ptr %x) {
+; CHECK-LABEL: @remove_tautological_store_atomic_monotonic(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[VAL]], 4
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_EQ:%.*]], label [[END:%.*]]
+; CHECK: if.eq:
+; CHECK-NEXT: store atomic i32 4, ptr [[X]] monotonic, align 4
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: ret void
+;
+entry:
+ %val = load i32, ptr %x, align 4
+ %cmp = icmp eq i32 %val, 4
+ br i1 %cmp, label %if.eq, label %end
+
+if.eq:
+ store atomic i32 4, ptr %x monotonic, align 4
+ br label %end
+
+end:
+ ret void
+}
+
+; Should not optimize since the store is in incorrect branch
+define void @remove_tautological_store_eq_wrong_branch(ptr %x, ptr %y) {
+; CHECK-LABEL: @remove_tautological_store_eq_wrong_branch(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[VALX:%.*]] = load i32, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[VALY:%.*]] = load i32, ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[VALX]], [[VALY]]
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_EQ:%.*]], label [[END:%.*]]
+; CHECK: if.eq:
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: store i32 [[VALY]], ptr [[X]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %valx = load i32, ptr %x, align 4
+ %valy = load i32, ptr %y, align 4
+ %cmp = icmp eq i32 %valx, %valy
+ br i1 %cmp, label %if.eq, label %end
+
+if.eq:
+ br label %end
+
+end:
+ store i32 %valy, ptr %x, align 4
+ ret void
+}
+
+; Should not optimize since the store is in incorrect branch
+define void @remove_tautological_store_ne_wrong_branch(ptr %x) {
+; CHECK-LABEL: @remove_tautological_store_ne_wrong_branch(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[VAL]], 4
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_NE:%.*]], label [[END:%.*]]
+; CHECK: if.ne:
+; CHECK-NEXT: store i32 4, ptr [[X]], align 4
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: ret void
+;
+entry:
+ %val = load i32, ptr %x, align 4
+ %cmp = icmp ne i32 %val, 4
+ br i1 %cmp, label %if.ne, label %end
+
+if.ne:
+ store i32 4, ptr %x, align 4
+ br label %end
+
+end:
+ ret void
+}
+
+; Dominating condition implies value already exists, optimize store
+; Should not optimize since we cannot determine if we should when both
+; branches are the same
+define void @remove_tautological_store_same_branch(ptr %x) {
+; CHECK-LABEL: @remove_tautological_store_same_branch(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[VAL]], 4
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_EQ:%.*]], label [[IF_EQ]]
+; CHECK: if.eq:
+; CHECK-NEXT: store i32 4, ptr [[X]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %val = load i32, ptr %x, align 4
+ %cmp = icmp eq i32 %val, 4
+ br i1 %cmp, label %if.eq, label %if.eq
+
+if.eq:
+ store i32 4, ptr %x, align 4
+ ret void
+}
+
+; Dominating condition implies value already exists, optimize store
+; Should not optimize since value being stored is different from cond check
+define void @remove_tautological_store_wrong_value(ptr %x) {
+; CHECK-LABEL: @remove_tautological_store_wrong_value(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[VAL]], 4
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_EQ:%.*]], label [[END:%.*]]
+; CHECK: if.eq:
+; CHECK-NEXT: store i32 5, ptr [[X]], align 4
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: ret void
+;
+entry:
+ %val = load i32, ptr %x, align 4
+ %cmp = icmp eq i32 %val, 4
+ br i1 %cmp, label %if.eq, label %end
+
+if.eq:
+ store i32 5, ptr %x, align 4
+ br label %end
+
+end:
+ ret void
+}
+
+; Should not optimize since there is a clobbering acc after load
+define void @remove_tautological_store_clobber(ptr %x) {
+; CHECK-LABEL: @remove_tautological_store_clobber(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[X:%.*]], align 4
+; CHECK-NEXT: store i32 5, ptr [[X]], align 4
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[VAL]], 4
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_EQ:%.*]], label [[END:%.*]]
+; CHECK: if.eq:
+; CHECK-NEXT: store i32 4, ptr [[X]], align 4
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: ret void
+;
+entry:
+ %val = load i32, ptr %x, align 4
+ store i32 5, ptr %x, align 4
+ %cmp = icmp eq i32 %val, 4
+ br i1 %cmp, label %if.eq, label %end
+
+if.eq:
+ store i32 4, ptr %x, align 4
+ br label %end
+
+end:
+ ret void
+}
+
+; Should not optimize since the condition does not dominate the store
+define void @remove_tautological_store_no_dom(ptr %x) {
+; CHECK-LABEL: @remove_tautological_store_no_dom(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[VAL]], 4
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_EQ:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.eq:
+; CHECK-NEXT: br label [[END:%.*]]
+; CHECK: if.else:
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: store i32 4, ptr [[X]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %val = load i32, ptr %x, align 4
+ store i32 5, ptr %x, align 4
+ %cmp = icmp eq i32 %val, 4
+ br i1 %cmp, label %if.eq, label %if.else
+
+if.eq:
+ br label %end
+
+if.else:
+ br label %end
+
+end:
+ store i32 4, ptr %x, align 4
+ ret void
+}
+
+; Should not optimize volatile stores
+define void @remove_tautological_store_volatile(ptr %x) {
+; CHECK-LABEL: @remove_tautological_store_volatile(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[VAL]], 4
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_EQ:%.*]], label [[END:%.*]]
+; CHECK: if.eq:
+; CHECK-NEXT: store volatile i32 4, ptr [[X]], align 4
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: ret void
+;
+entry:
+ %val = load i32, ptr %x, align 4
+ %cmp = icmp eq i32 %val, 4
+ br i1 %cmp, label %if.eq, label %end
+
+if.eq:
+ store volatile i32 4, ptr %x, align 4
+ br label %end
+
+end:
+ ret void
+}
+
+; Should not optimize stores where the edge from branch inst to
+; conditional block does not dominate the conditional block.
+; (A conditional block post dominates the branch inst.)
+define void @remove_tautological_store_no_edge_domination(ptr %x) {
+; CHECK-LABEL: @remove_tautological_store_no_edge_domination(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[X1:%.*]] = load ptr, ptr [[X:%.*]], align 8
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[X1]], null
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_EQ:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.eq:
+; CHECK-NEXT: store ptr null, ptr [[X]], align 8
+; CHECK-NEXT: br label [[END:%.*]]
+; CHECK: if.else:
+; CHECK-NEXT: br label [[IF_EQ]]
+; CHECK: end:
+; CHECK-NEXT: ret void
+;
+entry:
+ %x1 = load ptr, ptr %x, align 8
+ %cmp = icmp eq ptr %x1, null
+ br i1 %cmp, label %if.eq, label %if.else
+
+if.eq:
+ store ptr null, ptr %x, align 8
+ br label %end
+
+if.else:
+ br label %if.eq
+
+end:
+ ret void
+}
diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll
index 81f9fe4..d13129c 100644
--- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll
+++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll
@@ -22,7 +22,7 @@ define i1 @p0(i8 %x, i8 %y) {
; CHECK-LABEL: @p0(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[RET]]
;
@@ -42,7 +42,7 @@ define <2 x i1> @p1_vec(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @p1_vec(
; CHECK-NEXT: [[T0:%.*]] = shl nsw <2 x i8> <i8 -1, i8 -1>, [[Y:%.*]]
; CHECK-NEXT: call void @use2i8(<2 x i8> [[T0]])
-; CHECK-NEXT: [[T1:%.*]] = lshr exact <2 x i8> [[T0]], [[Y]]
+; CHECK-NEXT: [[T1:%.*]] = lshr <2 x i8> <i8 -1, i8 -1>, [[Y]]
; CHECK-NEXT: [[RET:%.*]] = icmp uge <2 x i8> [[T1]], [[X:%.*]]
; CHECK-NEXT: ret <2 x i1> [[RET]]
;
@@ -58,7 +58,7 @@ define <3 x i1> @p2_vec_undef0(<3 x i8> %x, <3 x i8> %y) {
; CHECK-LABEL: @p2_vec_undef0(
; CHECK-NEXT: [[T0:%.*]] = shl <3 x i8> <i8 -1, i8 undef, i8 -1>, [[Y:%.*]]
; CHECK-NEXT: call void @use3i8(<3 x i8> [[T0]])
-; CHECK-NEXT: [[T1:%.*]] = lshr exact <3 x i8> [[T0]], [[Y]]
+; CHECK-NEXT: [[T1:%.*]] = lshr <3 x i8> <i8 -1, i8 -1, i8 -1>, [[Y]]
; CHECK-NEXT: [[RET:%.*]] = icmp uge <3 x i8> [[T1]], [[X:%.*]]
; CHECK-NEXT: ret <3 x i1> [[RET]]
;
@@ -80,7 +80,7 @@ define i1 @c0(i8 %y) {
; CHECK-LABEL: @c0(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
; CHECK-NEXT: [[X:%.*]] = call i8 @gen8()
; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
@@ -98,7 +98,7 @@ define i1 @c1(i8 %y) {
; CHECK-LABEL: @c1(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
; CHECK-NEXT: [[X:%.*]] = call i8 @gen8()
; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
@@ -116,7 +116,7 @@ define i1 @c2(i8 %y) {
; CHECK-LABEL: @c2(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
; CHECK-NEXT: [[X:%.*]] = call i8 @gen8()
; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
@@ -138,7 +138,7 @@ define i1 @oneuse0(i8 %x, i8 %y) {
; CHECK-LABEL: @oneuse0(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
; CHECK-NEXT: call void @use8(i8 [[T1]])
; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[RET]]
@@ -156,7 +156,7 @@ define i1 @oneuse1(i8 %x, i8 %y) {
; CHECK-LABEL: @oneuse1(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T2]])
; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X]]
@@ -175,7 +175,7 @@ define i1 @oneuse2(i8 %x, i8 %y) {
; CHECK-LABEL: @oneuse2(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
; CHECK-NEXT: call void @use8(i8 [[T1]])
; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T2]])
@@ -200,7 +200,7 @@ define i1 @n0(i8 %x, i8 %y, i8 %notx) {
; CHECK-LABEL: @n0(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[T2]], [[NOTX:%.*]]
; CHECK-NEXT: ret i1 [[RET]]
diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll
index 321a115..a1517b36 100644
--- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll
+++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll
@@ -22,7 +22,7 @@ define i1 @p0(i8 %x, i8 %y) {
; CHECK-LABEL: @p0(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[RET]]
;
@@ -42,7 +42,7 @@ define <2 x i1> @p1_vec(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @p1_vec(
; CHECK-NEXT: [[T0:%.*]] = shl nsw <2 x i8> <i8 -1, i8 -1>, [[Y:%.*]]
; CHECK-NEXT: call void @use2i8(<2 x i8> [[T0]])
-; CHECK-NEXT: [[T1:%.*]] = lshr exact <2 x i8> [[T0]], [[Y]]
+; CHECK-NEXT: [[T1:%.*]] = lshr <2 x i8> <i8 -1, i8 -1>, [[Y]]
; CHECK-NEXT: [[RET:%.*]] = icmp ult <2 x i8> [[T1]], [[X:%.*]]
; CHECK-NEXT: ret <2 x i1> [[RET]]
;
@@ -58,7 +58,7 @@ define <3 x i1> @p2_vec_undef0(<3 x i8> %x, <3 x i8> %y) {
; CHECK-LABEL: @p2_vec_undef0(
; CHECK-NEXT: [[T0:%.*]] = shl <3 x i8> <i8 -1, i8 undef, i8 -1>, [[Y:%.*]]
; CHECK-NEXT: call void @use3i8(<3 x i8> [[T0]])
-; CHECK-NEXT: [[T1:%.*]] = lshr exact <3 x i8> [[T0]], [[Y]]
+; CHECK-NEXT: [[T1:%.*]] = lshr <3 x i8> <i8 -1, i8 -1, i8 -1>, [[Y]]
; CHECK-NEXT: [[RET:%.*]] = icmp ult <3 x i8> [[T1]], [[X:%.*]]
; CHECK-NEXT: ret <3 x i1> [[RET]]
;
@@ -80,7 +80,7 @@ define i1 @c0(i8 %y) {
; CHECK-LABEL: @c0(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
; CHECK-NEXT: [[X:%.*]] = call i8 @gen8()
; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
@@ -98,7 +98,7 @@ define i1 @c1(i8 %y) {
; CHECK-LABEL: @c1(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
; CHECK-NEXT: [[X:%.*]] = call i8 @gen8()
; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
@@ -116,7 +116,7 @@ define i1 @c2(i8 %y) {
; CHECK-LABEL: @c2(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
; CHECK-NEXT: [[X:%.*]] = call i8 @gen8()
; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
@@ -138,7 +138,7 @@ define i1 @oneuse0(i8 %x, i8 %y) {
; CHECK-LABEL: @oneuse0(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
; CHECK-NEXT: call void @use8(i8 [[T1]])
; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[RET]]
@@ -156,7 +156,7 @@ define i1 @oneuse1(i8 %x, i8 %y) {
; CHECK-LABEL: @oneuse1(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T2]])
; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X]]
@@ -175,7 +175,7 @@ define i1 @oneuse2(i8 %x, i8 %y) {
; CHECK-LABEL: @oneuse2(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
; CHECK-NEXT: call void @use8(i8 [[T1]])
; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T2]])
@@ -200,7 +200,7 @@ define i1 @n0(i8 %x, i8 %y, i8 %notx) {
; CHECK-LABEL: @n0(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[T2]], [[NOTX:%.*]]
; CHECK-NEXT: ret i1 [[RET]]
diff --git a/llvm/test/Transforms/InstCombine/fpclass-check-idioms.ll b/llvm/test/Transforms/InstCombine/fpclass-check-idioms.ll
index d2b4536..42c6506 100644
--- a/llvm/test/Transforms/InstCombine/fpclass-check-idioms.ll
+++ b/llvm/test/Transforms/InstCombine/fpclass-check-idioms.ll
@@ -14,6 +14,18 @@ define i1 @f32_fcnan_fcinf(float %a) {
ret i1 %cmp
}
+define i1 @f32_fcnan_fcinf_strictfp(float %a) strictfp {
+; CHECK-LABEL: define i1 @f32_fcnan_fcinf_strictfp(
+; CHECK-SAME: float [[A:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[CMP:%.*]] = call i1 @llvm.is.fpclass.f32(float [[A]], i32 519)
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %i32 = bitcast float %a to i32
+ %and = and i32 %i32, 2139095040
+ %cmp = icmp eq i32 %and, 2139095040
+ ret i1 %cmp
+}
+
define i1 @f32_not_fcnan_fcinf(float %a) {
; CHECK-LABEL: define i1 @f32_not_fcnan_fcinf(
; CHECK-SAME: float [[A:%.*]]) {
@@ -27,6 +39,18 @@ define i1 @f32_not_fcnan_fcinf(float %a) {
ret i1 %cmp
}
+define i1 @f32_not_fcnan_fcinf_strictfp(float %a) strictfp {
+; CHECK-LABEL: define i1 @f32_not_fcnan_fcinf_strictfp(
+; CHECK-SAME: float [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[CMP:%.*]] = call i1 @llvm.is.fpclass.f32(float [[A]], i32 504)
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %i32 = bitcast float %a to i32
+ %and = and i32 %i32, 2139095040
+ %cmp = icmp ne i32 %and, 2139095040
+ ret i1 %cmp
+}
+
define i1 @f64_fcnan_fcinf(double %a) {
; CHECK-LABEL: define i1 @f64_fcnan_fcinf(
; CHECK-SAME: double [[A:%.*]]) {
@@ -40,6 +64,18 @@ define i1 @f64_fcnan_fcinf(double %a) {
ret i1 %cmp
}
+define i1 @f64_fcnan_fcinf_strictfp(double %a) strictfp {
+; CHECK-LABEL: define i1 @f64_fcnan_fcinf_strictfp(
+; CHECK-SAME: double [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[CMP:%.*]] = call i1 @llvm.is.fpclass.f64(double [[A]], i32 519)
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %i64 = bitcast double %a to i64
+ %and = and i64 %i64, 9218868437227405312
+ %cmp = icmp eq i64 %and, 9218868437227405312
+ ret i1 %cmp
+}
+
define i1 @f32_fcinf(float %a) {
; CHECK-LABEL: define i1 @f32_fcinf(
; CHECK-SAME: float [[A:%.*]]) {
@@ -53,6 +89,18 @@ define i1 @f32_fcinf(float %a) {
ret i1 %cmp
}
+define i1 @f32_fcinf_strictfp(float %a) strictfp {
+; CHECK-LABEL: define i1 @f32_fcinf_strictfp(
+; CHECK-SAME: float [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[CMP:%.*]] = call i1 @llvm.is.fpclass.f32(float [[A]], i32 516)
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %i32 = bitcast float %a to i32
+ %and = and i32 %i32, 2147483647
+ %cmp = icmp eq i32 %and, 2139095040
+ ret i1 %cmp
+}
+
define i1 @f32_fcposinf(float %a) {
; CHECK-LABEL: define i1 @f32_fcposinf(
; CHECK-SAME: float [[A:%.*]]) {
@@ -64,6 +112,17 @@ define i1 @f32_fcposinf(float %a) {
ret i1 %cmp
}
+define i1 @f32_fcposinf_strictfp(float %a) strictfp {
+; CHECK-LABEL: define i1 @f32_fcposinf_strictfp(
+; CHECK-SAME: float [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[CMP:%.*]] = call i1 @llvm.is.fpclass.f32(float [[A]], i32 512)
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %i32 = bitcast float %a to i32
+ %cmp = icmp eq i32 %i32, 2139095040
+ ret i1 %cmp
+}
+
define i1 @f32_fcneginf(float %a) {
; CHECK-LABEL: define i1 @f32_fcneginf(
; CHECK-SAME: float [[A:%.*]]) {
@@ -75,6 +134,17 @@ define i1 @f32_fcneginf(float %a) {
ret i1 %cmp
}
+define i1 @f32_fcneginf_strictfp(float %a) strictfp {
+; CHECK-LABEL: define i1 @f32_fcneginf_strictfp(
+; CHECK-SAME: float [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[CMP:%.*]] = call i1 @llvm.is.fpclass.f32(float [[A]], i32 4)
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %i32 = bitcast float %a to i32
+ %cmp = icmp eq i32 %i32, 4286578688
+ ret i1 %cmp
+}
+
define i1 @f32_fcposzero(float %a) {
; CHECK-LABEL: define i1 @f32_fcposzero(
; CHECK-SAME: float [[A:%.*]]) {
@@ -86,6 +156,17 @@ define i1 @f32_fcposzero(float %a) {
ret i1 %cmp
}
+define i1 @f32_fcposzero_strictfp(float %a) strictfp {
+; CHECK-LABEL: define i1 @f32_fcposzero_strictfp(
+; CHECK-SAME: float [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[CMP:%.*]] = call i1 @llvm.is.fpclass.f32(float [[A]], i32 64)
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %i32 = bitcast float %a to i32
+ %cmp = icmp eq i32 %i32, 0
+ ret i1 %cmp
+}
+
define i1 @f32_fcnegzero(float %a) {
; CHECK-LABEL: define i1 @f32_fcnegzero(
; CHECK-SAME: float [[A:%.*]]) {
@@ -97,6 +178,17 @@ define i1 @f32_fcnegzero(float %a) {
ret i1 %cmp
}
+define i1 @f32_fcnegzero_strictfp(float %a) strictfp {
+; CHECK-LABEL: define i1 @f32_fcnegzero_strictfp(
+; CHECK-SAME: float [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[CMP:%.*]] = call i1 @llvm.is.fpclass.f32(float [[A]], i32 32)
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %i32 = bitcast float %a to i32
+ %cmp = icmp eq i32 %i32, 2147483648
+ ret i1 %cmp
+}
+
define i1 @f32_fczero(float %a) {
; CHECK-LABEL: define i1 @f32_fczero(
; CHECK-SAME: float [[A:%.*]]) {
@@ -109,6 +201,18 @@ define i1 @f32_fczero(float %a) {
ret i1 %cmp
}
+define i1 @f32_fczero_strictfp(float %a) strictfp {
+; CHECK-LABEL: define i1 @f32_fczero_strictfp(
+; CHECK-SAME: float [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[CMP:%.*]] = call i1 @llvm.is.fpclass.f32(float [[A]], i32 96)
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %i32 = bitcast float %a to i32
+ %and = and i32 %i32, 2147483647
+ %cmp = icmp eq i32 %and, 0
+ ret i1 %cmp
+}
+
; TODO: handle more fpclass check idioms
define i1 @f32_fcnan(float %a) {
; CHECK-LABEL: define i1 @f32_fcnan(
@@ -130,17 +234,24 @@ define i1 @f32_fcnan(float %a) {
ret i1 %res
}
-define i1 @f32_fcnan_fcinf_strictfp(float %a) strictfp {
-; CHECK-LABEL: define i1 @f32_fcnan_fcinf_strictfp(
-; CHECK-SAME: float [[A:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.fabs.f32(float [[A]])
-; CHECK-NEXT: [[CMP:%.*]] = fcmp ueq float [[TMP1]], 0x7FF0000000000000
-; CHECK-NEXT: ret i1 [[CMP]]
+define i1 @f32_fcnan_strictfp(float %a) strictfp {
+; CHECK-LABEL: define i1 @f32_fcnan_strictfp(
+; CHECK-SAME: float [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[I32:%.*]] = bitcast float [[A]] to i32
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[I32]], 2139095040
+; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[AND1]], 2139095040
+; CHECK-NEXT: [[AND2:%.*]] = and i32 [[I32]], 8388607
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i32 [[AND2]], 0
+; CHECK-NEXT: [[RES:%.*]] = and i1 [[CMP1]], [[CMP2]]
+; CHECK-NEXT: ret i1 [[RES]]
;
%i32 = bitcast float %a to i32
- %and = and i32 %i32, 2139095040
- %cmp = icmp eq i32 %and, 2139095040
- ret i1 %cmp
+ %and1 = and i32 %i32, 2139095040
+ %cmp1 = icmp eq i32 %and1, 2139095040
+ %and2 = and i32 %i32, 8388607
+ %cmp2 = icmp ne i32 %and2, 0
+ %res = and i1 %cmp1, %cmp2
+ ret i1 %res
}
define <2 x i1> @f32_fcnan_fcinf_vec(<2 x float> %a) {
@@ -156,6 +267,18 @@ define <2 x i1> @f32_fcnan_fcinf_vec(<2 x float> %a) {
ret <2 x i1> %cmp
}
+define <2 x i1> @f32_fcnan_fcinf_vec_strictfp(<2 x float> %a) strictfp {
+; CHECK-LABEL: define <2 x i1> @f32_fcnan_fcinf_vec_strictfp(
+; CHECK-SAME: <2 x float> [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[CMP:%.*]] = call <2 x i1> @llvm.is.fpclass.v2f32(<2 x float> [[A]], i32 519)
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %i32 = bitcast <2 x float> %a to <2 x i32>
+ %and = and <2 x i32> %i32, <i32 2139095040, i32 2139095040>
+ %cmp = icmp eq <2 x i32> %and, <i32 2139095040, i32 2139095040>
+ ret <2 x i1> %cmp
+}
+
define <2 x i1> @f32_fcinf_vec(<2 x float> %a) {
; CHECK-LABEL: define <2 x i1> @f32_fcinf_vec(
; CHECK-SAME: <2 x float> [[A:%.*]]) {
@@ -169,6 +292,18 @@ define <2 x i1> @f32_fcinf_vec(<2 x float> %a) {
ret <2 x i1> %cmp
}
+define <2 x i1> @f32_fcinf_vec_strictfp(<2 x float> %a) strictfp {
+; CHECK-LABEL: define <2 x i1> @f32_fcinf_vec_strictfp(
+; CHECK-SAME: <2 x float> [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[CMP:%.*]] = call <2 x i1> @llvm.is.fpclass.v2f32(<2 x float> [[A]], i32 516)
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %i32 = bitcast <2 x float> %a to <2 x i32>
+ %and = and <2 x i32> %i32, <i32 2147483647, i32 2147483647>
+ %cmp = icmp eq <2 x i32> %and, <i32 2139095040, i32 2139095040>
+ ret <2 x i1> %cmp
+}
+
; Negative tests
define i1 @f32_fcnan_fcinf_wrong_mask1(float %a) {
@@ -185,6 +320,20 @@ define i1 @f32_fcnan_fcinf_wrong_mask1(float %a) {
ret i1 %cmp
}
+define i1 @f32_fcnan_fcinf_wrong_mask1_strictfp(float %a) strictfp {
+; CHECK-LABEL: define i1 @f32_fcnan_fcinf_wrong_mask1_strictfp(
+; CHECK-SAME: float [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[I32:%.*]] = bitcast float [[A]] to i32
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[I32]], 2139095041
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 2139095040
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %i32 = bitcast float %a to i32
+ %and = and i32 %i32, 2139095041
+ %cmp = icmp eq i32 %and, 2139095040
+ ret i1 %cmp
+}
+
define i1 @f32_fcnan_fcinf_wrong_mask2(float %a) {
; CHECK-LABEL: define i1 @f32_fcnan_fcinf_wrong_mask2(
; CHECK-SAME: float [[A:%.*]]) {
@@ -199,6 +348,20 @@ define i1 @f32_fcnan_fcinf_wrong_mask2(float %a) {
ret i1 %cmp
}
+define i1 @f32_fcnan_fcinf_wrong_mask2_strictfp(float %a) strictfp {
+; CHECK-LABEL: define i1 @f32_fcnan_fcinf_wrong_mask2_strictfp(
+; CHECK-SAME: float [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[I32:%.*]] = bitcast float [[A]] to i32
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[I32]], 2139095040
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 2130706432
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %i32 = bitcast float %a to i32
+ %and = and i32 %i32, 2139095040
+ %cmp = icmp eq i32 %and, 2130706432
+ ret i1 %cmp
+}
+
define i1 @f64_fcnan_fcinf_wrong_mask3(double %a) {
; CHECK-LABEL: define i1 @f64_fcnan_fcinf_wrong_mask3(
; CHECK-SAME: double [[A:%.*]]) {
@@ -213,6 +376,20 @@ define i1 @f64_fcnan_fcinf_wrong_mask3(double %a) {
ret i1 %cmp
}
+define i1 @f64_fcnan_fcinf_wrong_mask3_strictfp(double %a) strictfp {
+; CHECK-LABEL: define i1 @f64_fcnan_fcinf_wrong_mask3_strictfp(
+; CHECK-SAME: double [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[I64:%.*]] = bitcast double [[A]] to i64
+; CHECK-NEXT: [[AND:%.*]] = and i64 [[I64]], 2139095040
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[AND]], 2139095040
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %i64 = bitcast double %a to i64
+ %and = and i64 %i64, 2139095040
+ %cmp = icmp eq i64 %and, 2139095040
+ ret i1 %cmp
+}
+
define i1 @f32_fcnan_fcinf_wrong_pred(float %a) {
; CHECK-LABEL: define i1 @f32_fcnan_fcinf_wrong_pred(
; CHECK-SAME: float [[A:%.*]]) {
@@ -226,6 +403,18 @@ define i1 @f32_fcnan_fcinf_wrong_pred(float %a) {
ret i1 %cmp
}
+define i1 @f32_fcnan_fcinf_wrong_pred_strictfp(float %a) strictfp {
+; CHECK-LABEL: define i1 @f32_fcnan_fcinf_wrong_pred_strictfp(
+; CHECK-SAME: float [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[CMP:%.*]] = call i1 @llvm.is.fpclass.f32(float [[A]], i32 504)
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %i32 = bitcast float %a to i32
+ %and = and i32 %i32, 2139095040
+ %cmp = icmp slt i32 %and, 2139095040
+ ret i1 %cmp
+}
+
define i1 @f32_fcposzero_wrong_pred(float %a) {
; CHECK-LABEL: define i1 @f32_fcposzero_wrong_pred(
; CHECK-SAME: float [[A:%.*]]) {
@@ -238,6 +427,18 @@ define i1 @f32_fcposzero_wrong_pred(float %a) {
ret i1 %cmp
}
+define i1 @f32_fcposzero_wrong_pred_strictfp(float %a) strictfp {
+; CHECK-LABEL: define i1 @f32_fcposzero_wrong_pred_strictfp(
+; CHECK-SAME: float [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[I32:%.*]] = bitcast float [[A]] to i32
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[I32]], 0
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %i32 = bitcast float %a to i32
+ %cmp = icmp slt i32 %i32, 0
+ ret i1 %cmp
+}
+
define i1 @f32_fcnan_fcinf_wrong_type1(<2 x float> %a) {
; CHECK-LABEL: define i1 @f32_fcnan_fcinf_wrong_type1(
; CHECK-SAME: <2 x float> [[A:%.*]]) {
@@ -252,6 +453,20 @@ define i1 @f32_fcnan_fcinf_wrong_type1(<2 x float> %a) {
ret i1 %cmp
}
+define i1 @f32_fcnan_fcinf_wrong_type1_strictfp(<2 x float> %a) strictfp {
+; CHECK-LABEL: define i1 @f32_fcnan_fcinf_wrong_type1_strictfp(
+; CHECK-SAME: <2 x float> [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[I64:%.*]] = bitcast <2 x float> [[A]] to i64
+; CHECK-NEXT: [[AND:%.*]] = and i64 [[I64]], 2139095040
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[AND]], 2139095040
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %i64 = bitcast <2 x float> %a to i64
+ %and = and i64 %i64, 2139095040
+ %cmp = icmp eq i64 %and, 2139095040
+ ret i1 %cmp
+}
+
define i1 @f32_fcposinf_wrong_type1(<2 x float> %a) {
; CHECK-LABEL: define i1 @f32_fcposinf_wrong_type1(
; CHECK-SAME: <2 x float> [[A:%.*]]) {
@@ -264,6 +479,18 @@ define i1 @f32_fcposinf_wrong_type1(<2 x float> %a) {
ret i1 %cmp
}
+define i1 @f32_fcposinf_wrong_type1_strictfp(<2 x float> %a) strictfp {
+; CHECK-LABEL: define i1 @f32_fcposinf_wrong_type1_strictfp(
+; CHECK-SAME: <2 x float> [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[I64:%.*]] = bitcast <2 x float> [[A]] to i64
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[I64]], 2139095040
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %i64 = bitcast <2 x float> %a to i64
+ %cmp = icmp eq i64 %i64, 2139095040
+ ret i1 %cmp
+}
+
define i1 @f32_fcnan_fcinf_wrong_type2(x86_fp80 %a) {
; CHECK-LABEL: define i1 @f32_fcnan_fcinf_wrong_type2(
; CHECK-SAME: x86_fp80 [[A:%.*]]) {
@@ -278,6 +505,20 @@ define i1 @f32_fcnan_fcinf_wrong_type2(x86_fp80 %a) {
ret i1 %cmp
}
+define i1 @f32_fcnan_fcinf_wrong_type2_strictfp(x86_fp80 %a) strictfp {
+; CHECK-LABEL: define i1 @f32_fcnan_fcinf_wrong_type2_strictfp(
+; CHECK-SAME: x86_fp80 [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[I80:%.*]] = bitcast x86_fp80 [[A]] to i80
+; CHECK-NEXT: [[AND:%.*]] = and i80 [[I80]], 2139095040
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i80 [[AND]], 2139095040
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %i80 = bitcast x86_fp80 %a to i80
+ %and = and i80 %i80, 2139095040
+ %cmp = icmp eq i80 %and, 2139095040
+ ret i1 %cmp
+}
+
define i1 @f32_fcposzero_wrong_type2(x86_fp80 %a) {
; CHECK-LABEL: define i1 @f32_fcposzero_wrong_type2(
; CHECK-SAME: x86_fp80 [[A:%.*]]) {
@@ -290,6 +531,18 @@ define i1 @f32_fcposzero_wrong_type2(x86_fp80 %a) {
ret i1 %cmp
}
+define i1 @f32_fcposzero_wrong_type2_strictfp(x86_fp80 %a) strictfp {
+; CHECK-LABEL: define i1 @f32_fcposzero_wrong_type2_strictfp(
+; CHECK-SAME: x86_fp80 [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[I80:%.*]] = bitcast x86_fp80 [[A]] to i80
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i80 [[I80]], 0
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %i80 = bitcast x86_fp80 %a to i80
+ %cmp = icmp eq i80 %i80, 0
+ ret i1 %cmp
+}
+
define i1 @f32_fcnan_fcinf_noimplicitfloat(float %a) #0 {
; CHECK-LABEL: define i1 @f32_fcnan_fcinf_noimplicitfloat(
; CHECK-SAME: float [[A:%.*]]) #[[ATTR1:[0-9]+]] {
@@ -304,6 +557,20 @@ define i1 @f32_fcnan_fcinf_noimplicitfloat(float %a) #0 {
ret i1 %cmp
}
+define i1 @f32_fcnan_fcinf_noimplicitfloat_strictfp(float %a) strictfp #0 {
+; CHECK-LABEL: define i1 @f32_fcnan_fcinf_noimplicitfloat_strictfp(
+; CHECK-SAME: float [[A:%.*]]) #[[ATTR2:[0-9]+]] {
+; CHECK-NEXT: [[I32:%.*]] = bitcast float [[A]] to i32
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[I32]], 2139095040
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 2139095040
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %i32 = bitcast float %a to i32
+ %and = and i32 %i32, 2139095040
+ %cmp = icmp eq i32 %and, 2139095040
+ ret i1 %cmp
+}
+
define i1 @f32_fcposinf_noimplicitfloat(float %a) #0 {
; CHECK-LABEL: define i1 @f32_fcposinf_noimplicitfloat(
; CHECK-SAME: float [[A:%.*]]) #[[ATTR1]] {
@@ -316,6 +583,18 @@ define i1 @f32_fcposinf_noimplicitfloat(float %a) #0 {
ret i1 %cmp
}
+define i1 @f32_fcposinf_noimplicitfloat_strictfp(float %a) strictfp #0 {
+; CHECK-LABEL: define i1 @f32_fcposinf_noimplicitfloat_strictfp(
+; CHECK-SAME: float [[A:%.*]]) #[[ATTR2]] {
+; CHECK-NEXT: [[I32:%.*]] = bitcast float [[A]] to i32
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[I32]], 2139095040
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %i32 = bitcast float %a to i32
+ %cmp = icmp eq i32 %i32, 2139095040
+ ret i1 %cmp
+}
+
define i1 @f32_fcposnan(float %a) {
; CHECK-LABEL: define i1 @f32_fcposnan(
; CHECK-SAME: float [[A:%.*]]) {
@@ -328,6 +607,18 @@ define i1 @f32_fcposnan(float %a) {
ret i1 %cmp
}
+define i1 @f32_fcposnan_strictfp(float %a) strictfp {
+; CHECK-LABEL: define i1 @f32_fcposnan_strictfp(
+; CHECK-SAME: float [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[I32:%.*]] = bitcast float [[A]] to i32
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[I32]], 2139095041
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %i32 = bitcast float %a to i32
+ %cmp = icmp eq i32 %i32, 2139095041
+ ret i1 %cmp
+}
+
define i1 @f32_fcposinf_multiuse(float %a) {
; CHECK-LABEL: define i1 @f32_fcposinf_multiuse(
; CHECK-SAME: float [[A:%.*]]) {
@@ -342,6 +633,20 @@ define i1 @f32_fcposinf_multiuse(float %a) {
ret i1 %cmp
}
+define i1 @f32_fcposinf_multiuse_strictfp(float %a) strictfp {
+; CHECK-LABEL: define i1 @f32_fcposinf_multiuse_strictfp(
+; CHECK-SAME: float [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[I32:%.*]] = bitcast float [[A]] to i32
+; CHECK-NEXT: call void @usei32(i32 [[I32]])
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[I32]], 2139095040
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %i32 = bitcast float %a to i32
+ call void @usei32(i32 %i32)
+ %cmp = icmp eq i32 %i32, 2139095040
+ ret i1 %cmp
+}
+
declare void @usei32(i32)
attributes #0 = { noimplicitfloat }
diff --git a/llvm/test/Transforms/InstCombine/fpclass-from-dom-cond.ll b/llvm/test/Transforms/InstCombine/fpclass-from-dom-cond.ll
new file mode 100644
index 0000000..d40cd7f
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/fpclass-from-dom-cond.ll
@@ -0,0 +1,437 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S -passes=instcombine < %s | FileCheck %s
+
+define i1 @test1(float %x) {
+; CHECK-LABEL: define i1 @test1(
+; CHECK-SAME: float [[X:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[COND:%.*]] = fcmp ueq float [[X]], 0.000000e+00
+; CHECK-NEXT: br i1 [[COND]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: ret i1 false
+; CHECK: if.else:
+; CHECK-NEXT: [[RET:%.*]] = call i1 @llvm.is.fpclass.f32(float [[X]], i32 780)
+; CHECK-NEXT: ret i1 [[RET]]
+;
+entry:
+ %cond = fcmp ueq float %x, 0.000000e+00
+ br i1 %cond, label %if.then, label %if.else
+
+if.then:
+ ret i1 false
+
+if.else:
+ %ret = call i1 @llvm.is.fpclass.f32(float %x, i32 783)
+ ret i1 %ret
+}
+
+define i1 @test2(double %x) {
+; CHECK-LABEL: define i1 @test2(
+; CHECK-SAME: double [[X:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = fcmp olt double [[X]], 0x3EB0C6F7A0000000
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: ret i1 false
+; CHECK: if.end:
+; CHECK-NEXT: ret i1 false
+;
+entry:
+ %cmp = fcmp olt double %x, 0x3EB0C6F7A0000000
+ br i1 %cmp, label %if.then, label %if.end
+if.then:
+ ret i1 false
+if.end:
+ %cmp.i = fcmp oeq double %x, 0.000000e+00
+ ret i1 %cmp.i
+}
+
+define i1 @test3(float %x) {
+; CHECK-LABEL: define i1 @test3(
+; CHECK-SAME: float [[X:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt float [[X]], 3.000000e+00
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: [[RET:%.*]] = fcmp oeq float [[X]], 0x7FF0000000000000
+; CHECK-NEXT: ret i1 [[RET]]
+; CHECK: if.else:
+; CHECK-NEXT: ret i1 false
+;
+entry:
+ %cmp = fcmp ogt float %x, 3.000000e+00
+ br i1 %cmp, label %if.then, label %if.else
+if.then:
+ %abs = call float @llvm.fabs.f32(float %x)
+ %ret = fcmp oeq float %abs, 0x7FF0000000000000
+ ret i1 %ret
+if.else:
+ ret i1 false
+}
+
+define float @test4(float %x) {
+; CHECK-LABEL: define float @test4(
+; CHECK-SAME: float [[X:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = fcmp olt float [[X]], 0x3EB0C6F7A0000000
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: ret float 1.000000e+00
+; CHECK: if.end:
+; CHECK-NEXT: [[DIV:%.*]] = fdiv float 1.000000e+00, [[X]]
+; CHECK-NEXT: ret float [[DIV]]
+;
+entry:
+ %cmp = fcmp olt float %x, 0x3EB0C6F7A0000000
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ ret float 1.0
+
+if.end:
+ %cmp.i = fcmp oeq float %x, 0.000000e+00
+ %div = fdiv float 1.000000e+00, %x
+ %ret = select i1 %cmp.i, float 1.000000e+00, float %div
+ ret float %ret
+}
+
+define i1 @test5(double %x, i1 %cond) {
+; CHECK-LABEL: define i1 @test5(
+; CHECK-SAME: double [[X:%.*]], i1 [[COND:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[COND]], label [[IF:%.*]], label [[EXIT:%.*]]
+; CHECK: if:
+; CHECK-NEXT: [[CMP:%.*]] = fcmp uno double [[X]], 0.000000e+00
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: ret i1 false
+; CHECK: if.end:
+; CHECK-NEXT: br label [[EXIT]]
+; CHECK: exit:
+; CHECK-NEXT: [[Y:%.*]] = phi double [ -1.000000e+00, [[ENTRY:%.*]] ], [ [[X]], [[IF_END]] ]
+; CHECK-NEXT: [[RET:%.*]] = tail call i1 @llvm.is.fpclass.f64(double [[Y]], i32 408)
+; CHECK-NEXT: ret i1 [[RET]]
+;
+entry:
+ br i1 %cond, label %if, label %exit
+if:
+ %cmp = fcmp uno double %x, 0.000000e+00
+ br i1 %cmp, label %if.then, label %if.end
+if.then:
+ ret i1 false
+if.end:
+ br label %exit
+exit:
+ %y = phi double [ -1.000000e+00, %entry ], [ %x, %if.end ]
+ %ret = tail call i1 @llvm.is.fpclass.f64(double %y, i32 411)
+ ret i1 %ret
+}
+
+define i1 @test6(double %x) {
+; CHECK-LABEL: define i1 @test6(
+; CHECK-SAME: double [[X:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt double [[X]], 0.000000e+00
+; CHECK-NEXT: br i1 [[CMP]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
+; CHECK: land.rhs:
+; CHECK-NEXT: [[CMP_I:%.*]] = fcmp oeq double [[X]], 0x7FF0000000000000
+; CHECK-NEXT: br label [[LAND_END]]
+; CHECK: land.end:
+; CHECK-NEXT: [[RET:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[CMP_I]], [[LAND_RHS]] ]
+; CHECK-NEXT: ret i1 [[RET]]
+;
+entry:
+ %cmp = fcmp ogt double %x, 0.000000e+00
+ br i1 %cmp, label %land.rhs, label %land.end
+
+land.rhs:
+ %abs = tail call double @llvm.fabs.f64(double %x)
+ %and.i = bitcast double %abs to i64
+ %cmp.i = icmp eq i64 %and.i, 9218868437227405312
+ br label %land.end
+
+land.end:
+ %ret = phi i1 [ false, %entry ], [ %cmp.i, %land.rhs ]
+ ret i1 %ret
+}
+
+define i1 @test7(float %x) {
+; CHECK-LABEL: define i1 @test7(
+; CHECK-SAME: float [[X:%.*]]) {
+; CHECK-NEXT: [[COND:%.*]] = call i1 @llvm.is.fpclass.f32(float [[X]], i32 345)
+; CHECK-NEXT: br i1 [[COND]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: [[RET1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[X]], i32 328)
+; CHECK-NEXT: ret i1 [[RET1]]
+; CHECK: if.else:
+; CHECK-NEXT: [[RET2:%.*]] = call i1 @llvm.is.fpclass.f32(float [[X]], i32 128)
+; CHECK-NEXT: ret i1 [[RET2]]
+;
+ %cond = call i1 @llvm.is.fpclass.f32(float %x, i32 345)
+ br i1 %cond, label %if.then, label %if.else
+if.then:
+ %ret1 = call i1 @llvm.is.fpclass.f32(float %x, i32 456)
+ ret i1 %ret1
+if.else:
+ %ret2 = call i1 @llvm.is.fpclass.f32(float %x, i32 456)
+ ret i1 %ret2
+}
+
+; TODO: These two is.fpclass can be simplified.
+define i1 @test8(float %x) {
+; CHECK-LABEL: define i1 @test8(
+; CHECK-SAME: float [[X:%.*]]) {
+; CHECK-NEXT: [[ABS:%.*]] = call float @llvm.fabs.f32(float [[X]])
+; CHECK-NEXT: [[COND:%.*]] = fcmp oeq float [[ABS]], 0x7FF0000000000000
+; CHECK-NEXT: br i1 [[COND]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: [[RET1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[X]], i32 575)
+; CHECK-NEXT: ret i1 [[RET1]]
+; CHECK: if.else:
+; CHECK-NEXT: [[RET2:%.*]] = call i1 @llvm.is.fpclass.f32(float [[X]], i32 575)
+; CHECK-NEXT: ret i1 [[RET2]]
+;
+ %abs = call float @llvm.fabs.f32(float %x)
+ %cond = fcmp oeq float %abs, 0x7FF0000000000000
+ br i1 %cond, label %if.then, label %if.else
+if.then:
+ %ret1 = call i1 @llvm.is.fpclass.f32(float %x, i32 575)
+ ret i1 %ret1
+if.else:
+ %ret2 = call i1 @llvm.is.fpclass.f32(float %x, i32 575)
+ ret i1 %ret2
+}
+
+define i1 @test9(float %x) {
+; CHECK-LABEL: define i1 @test9(
+; CHECK-SAME: float [[X:%.*]]) {
+; CHECK-NEXT: [[COND:%.*]] = fcmp olt float [[X]], -1.000000e+00
+; CHECK-NEXT: br i1 [[COND]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: ret i1 false
+; CHECK: if.else:
+; CHECK-NEXT: ret i1 false
+;
+ %cond = fcmp olt float %x, -1.0
+ br i1 %cond, label %if.then, label %if.else
+if.then:
+ %ret1 = fcmp oeq float %x, 0x7FF0000000000000
+ ret i1 %ret1
+if.else:
+ ret i1 false
+}
+
+define i1 @test10(float %x) {
+; CHECK-LABEL: define i1 @test10(
+; CHECK-SAME: float [[X:%.*]]) {
+; CHECK-NEXT: [[COND:%.*]] = fcmp olt float [[X]], -1.000000e+00
+; CHECK-NEXT: br i1 [[COND]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: ret i1 false
+; CHECK: if.else:
+; CHECK-NEXT: ret i1 false
+;
+ %cond = fcmp olt float %x, -1.0
+ %neg = fneg float %x
+ br i1 %cond, label %if.then, label %if.else
+if.then:
+ %ret1 = fcmp oeq float %neg, 0xFFF0000000000000
+ ret i1 %ret1
+if.else:
+ ret i1 false
+}
+
+; TODO: handle and/or conditions
+define i1 @test11_and(float %x, i1 %cond2) {
+; CHECK-LABEL: define i1 @test11_and(
+; CHECK-SAME: float [[X:%.*]], i1 [[COND2:%.*]]) {
+; CHECK-NEXT: [[COND:%.*]] = fcmp olt float [[X]], -1.000000e+00
+; CHECK-NEXT: [[AND:%.*]] = and i1 [[COND]], [[COND2]]
+; CHECK-NEXT: br i1 [[AND]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: [[RET1:%.*]] = fcmp oeq float [[X]], 0x7FF0000000000000
+; CHECK-NEXT: ret i1 [[RET1]]
+; CHECK: if.else:
+; CHECK-NEXT: ret i1 false
+;
+ %cond = fcmp olt float %x, -1.0
+ %neg = fneg float %x
+ %and = and i1 %cond, %cond2
+ br i1 %and, label %if.then, label %if.else
+if.then:
+ %ret1 = fcmp oeq float %neg, 0xFFF0000000000000
+ ret i1 %ret1
+if.else:
+ ret i1 false
+}
+
+; TODO: handle and/or conditions
+define i1 @test12_or(float %x, i1 %cond2) {
+; CHECK-LABEL: define i1 @test12_or(
+; CHECK-SAME: float [[X:%.*]], i1 [[COND2:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[COND:%.*]] = fcmp ueq float [[X]], 0.000000e+00
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[COND]], [[COND2]]
+; CHECK-NEXT: br i1 [[OR]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: ret i1 false
+; CHECK: if.else:
+; CHECK-NEXT: [[RET:%.*]] = call i1 @llvm.is.fpclass.f32(float [[X]], i32 783)
+; CHECK-NEXT: ret i1 [[RET]]
+;
+entry:
+ %cond = fcmp ueq float %x, 0.000000e+00
+ %or = or i1 %cond, %cond2
+ br i1 %or, label %if.then, label %if.else
+
+if.then:
+ ret i1 false
+
+if.else:
+ %ret = call i1 @llvm.is.fpclass.f32(float %x, i32 783)
+ ret i1 %ret
+}
+
+define i1 @test1_no_dominating(float %x, i1 %c) {
+; CHECK-LABEL: define i1 @test1_no_dominating(
+; CHECK-SAME: float [[X:%.*]], i1 [[C:%.*]]) {
+; CHECK-NEXT: entry0:
+; CHECK-NEXT: br i1 [[C]], label [[ENTRY:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: entry:
+; CHECK-NEXT: [[COND:%.*]] = fcmp ueq float [[X]], 0.000000e+00
+; CHECK-NEXT: br i1 [[COND]], label [[IF_THEN:%.*]], label [[IF_ELSE]]
+; CHECK: if.then:
+; CHECK-NEXT: ret i1 false
+; CHECK: if.else:
+; CHECK-NEXT: [[RET:%.*]] = call i1 @llvm.is.fpclass.f32(float [[X]], i32 783)
+; CHECK-NEXT: ret i1 [[RET]]
+;
+entry0:
+ br i1 %c, label %entry, label %if.else
+
+entry:
+ %cond = fcmp ueq float %x, 0.000000e+00
+ br i1 %cond, label %if.then, label %if.else
+
+if.then:
+ ret i1 false
+
+if.else:
+ %ret = call i1 @llvm.is.fpclass.f32(float %x, i32 783)
+ ret i1 %ret
+}
+
+define float @test_signbit_check(float %x, i1 %cond) {
+; CHECK-LABEL: define float @test_signbit_check(
+; CHECK-SAME: float [[X:%.*]], i1 [[COND:%.*]]) {
+; CHECK-NEXT: [[I32:%.*]] = bitcast float [[X]] to i32
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[I32]], 0
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN1:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then1:
+; CHECK-NEXT: [[FNEG:%.*]] = fneg float [[X]]
+; CHECK-NEXT: br label [[IF_END:%.*]]
+; CHECK: if.else:
+; CHECK-NEXT: br i1 [[COND]], label [[IF_THEN2:%.*]], label [[IF_END]]
+; CHECK: if.then2:
+; CHECK-NEXT: br label [[IF_END]]
+; CHECK: if.end:
+; CHECK-NEXT: [[VALUE:%.*]] = phi float [ [[FNEG]], [[IF_THEN1]] ], [ [[X]], [[IF_THEN2]] ], [ [[X]], [[IF_ELSE]] ]
+; CHECK-NEXT: ret float [[VALUE]]
+;
+ %i32 = bitcast float %x to i32
+ %cmp = icmp slt i32 %i32, 0
+ br i1 %cmp, label %if.then1, label %if.else
+
+if.then1:
+ %fneg = fneg float %x
+ br label %if.end
+
+if.else:
+ br i1 %cond, label %if.then2, label %if.end
+
+if.then2:
+ br label %if.end
+
+if.end:
+ %value = phi float [ %fneg, %if.then1 ], [ %x, %if.then2 ], [ %x, %if.else ]
+ %ret = call float @llvm.fabs.f32(float %value)
+ ret float %ret
+}
+
+define float @test_signbit_check_fail(float %x, i1 %cond) {
+; CHECK-LABEL: define float @test_signbit_check_fail(
+; CHECK-SAME: float [[X:%.*]], i1 [[COND:%.*]]) {
+; CHECK-NEXT: [[I32:%.*]] = bitcast float [[X]] to i32
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[I32]], 0
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN1:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then1:
+; CHECK-NEXT: [[FNEG:%.*]] = fneg float [[X]]
+; CHECK-NEXT: br label [[IF_END:%.*]]
+; CHECK: if.else:
+; CHECK-NEXT: br i1 [[COND]], label [[IF_THEN2:%.*]], label [[IF_END]]
+; CHECK: if.then2:
+; CHECK-NEXT: [[FNEG2:%.*]] = fneg float [[X]]
+; CHECK-NEXT: br label [[IF_END]]
+; CHECK: if.end:
+; CHECK-NEXT: [[VALUE:%.*]] = phi float [ [[FNEG]], [[IF_THEN1]] ], [ [[FNEG2]], [[IF_THEN2]] ], [ [[X]], [[IF_ELSE]] ]
+; CHECK-NEXT: [[RET:%.*]] = call float @llvm.fabs.f32(float [[VALUE]])
+; CHECK-NEXT: ret float [[RET]]
+;
+ %i32 = bitcast float %x to i32
+ %cmp = icmp slt i32 %i32, 0
+ br i1 %cmp, label %if.then1, label %if.else
+
+if.then1:
+ %fneg = fneg float %x
+ br label %if.end
+
+if.else:
+ br i1 %cond, label %if.then2, label %if.end
+
+if.then2:
+ %fneg2 = fneg float %x
+ br label %if.end
+
+if.end:
+ %value = phi float [ %fneg, %if.then1 ], [ %fneg2, %if.then2 ], [ %x, %if.else ]
+ %ret = call float @llvm.fabs.f32(float %value)
+ ret float %ret
+}
+
+define <2 x float> @test_signbit_check_wrong_type(<2 x float> %x, i1 %cond) {
+; CHECK-LABEL: define <2 x float> @test_signbit_check_wrong_type(
+; CHECK-SAME: <2 x float> [[X:%.*]], i1 [[COND:%.*]]) {
+; CHECK-NEXT: [[I32:%.*]] = bitcast <2 x float> [[X]] to i64
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[I32]], 0
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN1:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then1:
+; CHECK-NEXT: [[FNEG:%.*]] = fneg <2 x float> [[X]]
+; CHECK-NEXT: br label [[IF_END:%.*]]
+; CHECK: if.else:
+; CHECK-NEXT: br i1 [[COND]], label [[IF_THEN2:%.*]], label [[IF_END]]
+; CHECK: if.then2:
+; CHECK-NEXT: br label [[IF_END]]
+; CHECK: if.end:
+; CHECK-NEXT: [[VALUE:%.*]] = phi <2 x float> [ [[FNEG]], [[IF_THEN1]] ], [ [[X]], [[IF_THEN2]] ], [ [[X]], [[IF_ELSE]] ]
+; CHECK-NEXT: [[RET:%.*]] = call <2 x float> @llvm.fabs.v2f32(<2 x float> [[VALUE]])
+; CHECK-NEXT: ret <2 x float> [[RET]]
+;
+ %i32 = bitcast <2 x float> %x to i64
+ %cmp = icmp slt i64 %i32, 0
+ br i1 %cmp, label %if.then1, label %if.else
+
+if.then1:
+ %fneg = fneg <2 x float> %x
+ br label %if.end
+
+if.else:
+ br i1 %cond, label %if.then2, label %if.end
+
+if.then2:
+ br label %if.end
+
+if.end:
+ %value = phi <2 x float> [ %fneg, %if.then1 ], [ %x, %if.then2 ], [ %x, %if.else ]
+ %ret = call <2 x float> @llvm.fabs.v2f32(<2 x float> %value)
+ ret <2 x float> %ret
+}
diff --git a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-d.ll b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-d.ll
index 6b7061f..4887385 100644
--- a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-d.ll
+++ b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-d.ll
@@ -18,7 +18,7 @@ define i32 @t0_basic(i64 %x, i32 %nbits) {
; CHECK-LABEL: @t0_basic(
; CHECK-NEXT: [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
; CHECK-NEXT: [[T1:%.*]] = shl nsw i64 -1, [[T0]]
-; CHECK-NEXT: [[T2:%.*]] = lshr exact i64 [[T1]], [[T0]]
+; CHECK-NEXT: [[T2:%.*]] = lshr i64 -1, [[T0]]
; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -33
; CHECK-NEXT: call void @use64(i64 [[T0]])
; CHECK-NEXT: call void @use64(i64 [[T1]])
@@ -54,7 +54,7 @@ define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
; CHECK-LABEL: @t1_vec_splat(
; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
; CHECK-NEXT: [[T1:%.*]] = shl nsw <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]]
-; CHECK-NEXT: [[T2:%.*]] = lshr exact <8 x i64> [[T1]], [[T0]]
+; CHECK-NEXT: [[T2:%.*]] = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]]
; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33>
; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]])
; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]])
@@ -85,7 +85,7 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
; CHECK-LABEL: @t2_vec_splat_undef(
; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
; CHECK-NEXT: [[T1:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, [[T0]]
-; CHECK-NEXT: [[T2:%.*]] = lshr exact <8 x i64> [[T1]], [[T0]]
+; CHECK-NEXT: [[T2:%.*]] = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]]
; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 undef, i32 -33>
; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]])
; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]])
@@ -116,7 +116,7 @@ define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
; CHECK-LABEL: @t3_vec_nonsplat(
; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
; CHECK-NEXT: [[T1:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, [[T0]]
-; CHECK-NEXT: [[T2:%.*]] = lshr exact <8 x i64> [[T1]], [[T0]]
+; CHECK-NEXT: [[T2:%.*]] = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]]
; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -64, i32 -63, i32 -33, i32 -32, i32 63, i32 64, i32 undef, i32 65>
; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]])
; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]])
@@ -149,7 +149,7 @@ define i32 @n4_extrause0(i64 %x, i32 %nbits) {
; CHECK-LABEL: @n4_extrause0(
; CHECK-NEXT: [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
; CHECK-NEXT: [[T1:%.*]] = shl nsw i64 -1, [[T0]]
-; CHECK-NEXT: [[T2:%.*]] = lshr exact i64 [[T1]], [[T0]]
+; CHECK-NEXT: [[T2:%.*]] = lshr i64 -1, [[T0]]
; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -33
; CHECK-NEXT: call void @use64(i64 [[T0]])
; CHECK-NEXT: call void @use64(i64 [[T1]])
@@ -182,7 +182,7 @@ define i32 @n5_extrause1(i64 %x, i32 %nbits) {
; CHECK-LABEL: @n5_extrause1(
; CHECK-NEXT: [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
; CHECK-NEXT: [[T1:%.*]] = shl nsw i64 -1, [[T0]]
-; CHECK-NEXT: [[T2:%.*]] = lshr exact i64 [[T1]], [[T0]]
+; CHECK-NEXT: [[T2:%.*]] = lshr i64 -1, [[T0]]
; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -33
; CHECK-NEXT: call void @use64(i64 [[T0]])
; CHECK-NEXT: call void @use64(i64 [[T1]])
@@ -215,7 +215,7 @@ define i32 @n6_extrause2(i64 %x, i32 %nbits) {
; CHECK-LABEL: @n6_extrause2(
; CHECK-NEXT: [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
; CHECK-NEXT: [[T1:%.*]] = shl nsw i64 -1, [[T0]]
-; CHECK-NEXT: [[T2:%.*]] = lshr exact i64 [[T1]], [[T0]]
+; CHECK-NEXT: [[T2:%.*]] = lshr i64 -1, [[T0]]
; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -33
; CHECK-NEXT: call void @use64(i64 [[T0]])
; CHECK-NEXT: call void @use64(i64 [[T1]])
diff --git a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll
index 92805c6..9c096d1 100644
--- a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll
+++ b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll
@@ -16,7 +16,7 @@ declare void @use32(i32)
define i32 @t0_basic(i32 %x, i32 %nbits) {
; CHECK-LABEL: @t0_basic(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i32 [[T0]], [[NBITS]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i32 -1, [[NBITS]]
; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -1
; CHECK-NEXT: call void @use32(i32 [[T0]])
; CHECK-NEXT: call void @use32(i32 [[T1]])
@@ -43,7 +43,7 @@ declare void @use8xi32(<8 x i32>)
define <8 x i32> @t2_vec_splat(<8 x i32> %x, <8 x i32> %nbits) {
; CHECK-LABEL: @t2_vec_splat(
; CHECK-NEXT: [[T0:%.*]] = shl nsw <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, [[NBITS:%.*]]
-; CHECK-NEXT: [[T1:%.*]] = lshr exact <8 x i32> [[T0]], [[NBITS]]
+; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, [[NBITS]]
; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]])
; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T1]])
@@ -66,7 +66,7 @@ define <8 x i32> @t2_vec_splat(<8 x i32> %x, <8 x i32> %nbits) {
define <8 x i32> @t2_vec_splat_undef(<8 x i32> %x, <8 x i32> %nbits) {
; CHECK-LABEL: @t2_vec_splat_undef(
; CHECK-NEXT: [[T0:%.*]] = shl <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 undef, i32 -1>, [[NBITS:%.*]]
-; CHECK-NEXT: [[T1:%.*]] = lshr exact <8 x i32> [[T0]], [[NBITS]]
+; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, [[NBITS]]
; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 undef, i32 -1>
; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]])
; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T1]])
@@ -89,7 +89,7 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i32> %x, <8 x i32> %nbits) {
define <8 x i32> @t2_vec_nonsplat(<8 x i32> %x, <8 x i32> %nbits) {
; CHECK-LABEL: @t2_vec_nonsplat(
; CHECK-NEXT: [[T0:%.*]] = shl nsw <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, [[NBITS:%.*]]
-; CHECK-NEXT: [[T1:%.*]] = lshr exact <8 x i32> [[T0]], [[NBITS]]
+; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, [[NBITS]]
; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -31, i32 -1, i32 0, i32 1, i32 31, i32 32, i32 33>
; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]])
; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T1]])
@@ -114,7 +114,7 @@ define <8 x i32> @t2_vec_nonsplat(<8 x i32> %x, <8 x i32> %nbits) {
define i32 @n3_extrause(i32 %x, i32 %nbits) {
; CHECK-LABEL: @n3_extrause(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i32 [[T0]], [[NBITS]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i32 -1, [[NBITS]]
; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]]
; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -1
; CHECK-NEXT: call void @use32(i32 [[T0]])
diff --git a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-d.ll b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-d.ll
index bdc7beb..1a977f6 100644
--- a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-d.ll
+++ b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-d.ll
@@ -18,7 +18,7 @@ define i32 @t0_basic(i64 %x, i32 %nbits) {
; CHECK-LABEL: @t0_basic(
; CHECK-NEXT: [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
; CHECK-NEXT: [[T1:%.*]] = shl nsw i64 -1, [[T0]]
-; CHECK-NEXT: [[T2:%.*]] = lshr exact i64 [[T1]], [[T0]]
+; CHECK-NEXT: [[T2:%.*]] = lshr i64 -1, [[T0]]
; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -32
; CHECK-NEXT: [[T4:%.*]] = and i64 [[T2]], [[X:%.*]]
; CHECK-NEXT: call void @use64(i64 [[T0]])
@@ -56,7 +56,7 @@ define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
; CHECK-LABEL: @t1_vec_splat(
; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
; CHECK-NEXT: [[T1:%.*]] = shl nsw <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]]
-; CHECK-NEXT: [[T2:%.*]] = lshr exact <8 x i64> [[T1]], [[T0]]
+; CHECK-NEXT: [[T2:%.*]] = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]]
; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32>
; CHECK-NEXT: [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]]
; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]])
@@ -89,7 +89,7 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
; CHECK-LABEL: @t2_vec_splat_undef(
; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
; CHECK-NEXT: [[T1:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, [[T0]]
-; CHECK-NEXT: [[T2:%.*]] = lshr exact <8 x i64> [[T1]], [[T0]]
+; CHECK-NEXT: [[T2:%.*]] = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]]
; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 undef, i32 -32>
; CHECK-NEXT: [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]]
; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]])
@@ -122,7 +122,7 @@ define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
; CHECK-LABEL: @t3_vec_nonsplat(
; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
; CHECK-NEXT: [[T1:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, [[T0]]
-; CHECK-NEXT: [[T2:%.*]] = lshr exact <8 x i64> [[T1]], [[T0]]
+; CHECK-NEXT: [[T2:%.*]] = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]]
; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -1, i32 0, i32 1, i32 31, i32 32, i32 undef, i32 64>
; CHECK-NEXT: [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]]
; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]])
@@ -157,7 +157,7 @@ define i32 @n4_extrause(i64 %x, i32 %nbits) {
; CHECK-LABEL: @n4_extrause(
; CHECK-NEXT: [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
; CHECK-NEXT: [[T1:%.*]] = shl nsw i64 -1, [[T0]]
-; CHECK-NEXT: [[T2:%.*]] = lshr exact i64 [[T1]], [[T0]]
+; CHECK-NEXT: [[T2:%.*]] = lshr i64 -1, [[T0]]
; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -32
; CHECK-NEXT: [[T4:%.*]] = and i64 [[T2]], [[X:%.*]]
; CHECK-NEXT: call void @use64(i64 [[T0]])
diff --git a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-d.ll b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-d.ll
index c91e5a0..549729f 100644
--- a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-d.ll
+++ b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-d.ll
@@ -18,7 +18,7 @@ declare void @use32(i32)
define i32 @t0_basic(i32 %x, i32 %nbits) {
; CHECK-LABEL: @t0_basic(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i32 [[T0]], [[NBITS]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i32 -1, [[NBITS]]
; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]]
; CHECK-NEXT: call void @use32(i32 [[T0]])
; CHECK-NEXT: call void @use32(i32 [[T1]])
@@ -39,7 +39,7 @@ define i32 @t0_basic(i32 %x, i32 %nbits) {
define i32 @t1_bigger_shift(i32 %x, i32 %nbits) {
; CHECK-LABEL: @t1_bigger_shift(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i32 [[T0]], [[NBITS]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i32 -1, [[NBITS]]
; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]]
; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], 1
; CHECK-NEXT: call void @use32(i32 [[T0]])
@@ -68,7 +68,7 @@ declare void @use3xi32(<3 x i32>)
define <3 x i32> @t2_vec_splat(<3 x i32> %x, <3 x i32> %nbits) {
; CHECK-LABEL: @t2_vec_splat(
; CHECK-NEXT: [[T0:%.*]] = shl nsw <3 x i32> <i32 -1, i32 -1, i32 -1>, [[NBITS:%.*]]
-; CHECK-NEXT: [[T1:%.*]] = lshr exact <3 x i32> [[T0]], [[NBITS]]
+; CHECK-NEXT: [[T1:%.*]] = lshr <3 x i32> <i32 -1, i32 -1, i32 -1>, [[NBITS]]
; CHECK-NEXT: [[T2:%.*]] = and <3 x i32> [[T1]], [[X:%.*]]
; CHECK-NEXT: [[T3:%.*]] = add <3 x i32> [[NBITS]], <i32 1, i32 1, i32 1>
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T0]])
@@ -93,7 +93,7 @@ define <3 x i32> @t2_vec_splat(<3 x i32> %x, <3 x i32> %nbits) {
define <3 x i32> @t3_vec_nonsplat(<3 x i32> %x, <3 x i32> %nbits) {
; CHECK-LABEL: @t3_vec_nonsplat(
; CHECK-NEXT: [[T0:%.*]] = shl nsw <3 x i32> <i32 -1, i32 -1, i32 -1>, [[NBITS:%.*]]
-; CHECK-NEXT: [[T1:%.*]] = lshr exact <3 x i32> [[T0]], [[NBITS]]
+; CHECK-NEXT: [[T1:%.*]] = lshr <3 x i32> <i32 -1, i32 -1, i32 -1>, [[NBITS]]
; CHECK-NEXT: [[T2:%.*]] = and <3 x i32> [[T1]], [[X:%.*]]
; CHECK-NEXT: [[T3:%.*]] = add <3 x i32> [[NBITS]], <i32 1, i32 0, i32 2>
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T0]])
@@ -118,7 +118,7 @@ define <3 x i32> @t3_vec_nonsplat(<3 x i32> %x, <3 x i32> %nbits) {
define <3 x i32> @t4_vec_undef(<3 x i32> %x, <3 x i32> %nbits) {
; CHECK-LABEL: @t4_vec_undef(
; CHECK-NEXT: [[T0:%.*]] = shl <3 x i32> <i32 -1, i32 undef, i32 -1>, [[NBITS:%.*]]
-; CHECK-NEXT: [[T1:%.*]] = lshr exact <3 x i32> [[T0]], [[NBITS]]
+; CHECK-NEXT: [[T1:%.*]] = lshr <3 x i32> <i32 -1, i32 -1, i32 -1>, [[NBITS]]
; CHECK-NEXT: [[T2:%.*]] = and <3 x i32> [[T1]], [[X:%.*]]
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T0]])
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T1]])
@@ -147,7 +147,7 @@ define i32 @t5_commutativity0(i32 %nbits) {
; CHECK-LABEL: @t5_commutativity0(
; CHECK-NEXT: [[X:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i32 [[T0]], [[NBITS]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i32 -1, [[NBITS]]
; CHECK-NEXT: [[T2:%.*]] = and i32 [[X]], [[T1]]
; CHECK-NEXT: call void @use32(i32 [[T0]])
; CHECK-NEXT: call void @use32(i32 [[T1]])
@@ -169,7 +169,7 @@ define i32 @t5_commutativity0(i32 %nbits) {
define i32 @t6_commutativity1(i32 %nbits0, i32 %nbits1) {
; CHECK-LABEL: @t6_commutativity1(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS0:%.*]]
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i32 [[T0]], [[NBITS0]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i32 -1, [[NBITS0]]
; CHECK-NEXT: [[T2:%.*]] = shl nsw i32 -1, [[NBITS1:%.*]]
; CHECK-NEXT: [[T3:%.*]] = lshr i32 [[T0]], [[NBITS1]]
; CHECK-NEXT: [[T4:%.*]] = and i32 [[T3]], [[T1]]
@@ -197,7 +197,7 @@ define i32 @t6_commutativity1(i32 %nbits0, i32 %nbits1) {
define i32 @t7_commutativity2(i32 %nbits0, i32 %nbits1) {
; CHECK-LABEL: @t7_commutativity2(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS0:%.*]]
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i32 [[T0]], [[NBITS0]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i32 -1, [[NBITS0]]
; CHECK-NEXT: [[T2:%.*]] = shl nsw i32 -1, [[NBITS1:%.*]]
; CHECK-NEXT: [[T3:%.*]] = lshr i32 [[T0]], [[NBITS1]]
; CHECK-NEXT: [[T4:%.*]] = and i32 [[T3]], [[T1]]
@@ -228,7 +228,7 @@ define i32 @t7_commutativity2(i32 %nbits0, i32 %nbits1) {
define i32 @t8_nuw(i32 %x, i32 %nbits) {
; CHECK-LABEL: @t8_nuw(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i32 [[T0]], [[NBITS]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i32 -1, [[NBITS]]
; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]]
; CHECK-NEXT: call void @use32(i32 [[T0]])
; CHECK-NEXT: call void @use32(i32 [[T1]])
@@ -249,7 +249,7 @@ define i32 @t8_nuw(i32 %x, i32 %nbits) {
define i32 @t9_nsw(i32 %x, i32 %nbits) {
; CHECK-LABEL: @t9_nsw(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i32 [[T0]], [[NBITS]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i32 -1, [[NBITS]]
; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]]
; CHECK-NEXT: call void @use32(i32 [[T0]])
; CHECK-NEXT: call void @use32(i32 [[T1]])
@@ -270,7 +270,7 @@ define i32 @t9_nsw(i32 %x, i32 %nbits) {
define i32 @t10_nuw_nsw(i32 %x, i32 %nbits) {
; CHECK-LABEL: @t10_nuw_nsw(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i32 [[T0]], [[NBITS]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i32 -1, [[NBITS]]
; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]]
; CHECK-NEXT: call void @use32(i32 [[T0]])
; CHECK-NEXT: call void @use32(i32 [[T1]])
@@ -298,7 +298,7 @@ define i32 @t11_assume_uge(i32 %x, i32 %masknbits, i32 %shiftnbits) {
; CHECK-NEXT: [[CMP:%.*]] = icmp uge i32 [[SHIFTNBITS:%.*]], [[MASKNBITS:%.*]]
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[MASKNBITS]]
-; CHECK-NEXT: [[T1:%.*]] = lshr exact i32 [[T0]], [[MASKNBITS]]
+; CHECK-NEXT: [[T1:%.*]] = lshr i32 -1, [[MASKNBITS]]
; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]]
; CHECK-NEXT: call void @use32(i32 [[T0]])
; CHECK-NEXT: call void @use32(i32 [[T1]])
diff --git a/llvm/test/Transforms/InstCombine/simplify-demanded-fpclass.ll b/llvm/test/Transforms/InstCombine/simplify-demanded-fpclass.ll
index dd9b714..5dfeb07 100644
--- a/llvm/test/Transforms/InstCombine/simplify-demanded-fpclass.ll
+++ b/llvm/test/Transforms/InstCombine/simplify-demanded-fpclass.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt -S -passes=instcombine -instcombine-simplify-demanded-fp-class < %s | FileCheck %s
+; RUN: opt -S -passes=instcombine < %s | FileCheck %s
declare float @llvm.fabs.f32(float)
declare float @llvm.copysign.f32(float, float)
diff --git a/llvm/test/Transforms/InstSimplify/compare.ll b/llvm/test/Transforms/InstSimplify/compare.ll
index ac2ebf5..1e90f0e 100644
--- a/llvm/test/Transforms/InstSimplify/compare.ll
+++ b/llvm/test/Transforms/InstSimplify/compare.ll
@@ -2453,36 +2453,6 @@ define i1 @icmp_nsw_2(i32 %V) {
ret i1 %cmp
}
-define i1 @icmp_nsw_commute(i32 %V) {
-; CHECK-LABEL: @icmp_nsw_commute(
-; CHECK-NEXT: ret i1 true
-;
- %add5 = add i32 5, %V
- %add6 = add nsw i32 %V, 6
- %cmp = icmp slt i32 %add5, %add6
- ret i1 %cmp
-}
-
-define i1 @icmp_nsw_commute2(i32 %V) {
-; CHECK-LABEL: @icmp_nsw_commute2(
-; CHECK-NEXT: ret i1 true
-;
- %add5 = add i32 %V, 5
- %add6 = add nsw i32 6, %V
- %cmp = icmp slt i32 %add5, %add6
- ret i1 %cmp
-}
-
-define i1 @icmp_nsw_commute3(i32 %V) {
-; CHECK-LABEL: @icmp_nsw_commute3(
-; CHECK-NEXT: ret i1 true
-;
- %add5 = add i32 5, %V
- %add6 = add nsw i32 6, %V
- %cmp = icmp slt i32 %add5, %add6
- ret i1 %cmp
-}
-
define i1 @icmp_nsw_22(i32 %V) {
; CHECK-LABEL: @icmp_nsw_22(
; CHECK-NEXT: ret i1 true
diff --git a/llvm/test/Transforms/InstSimplify/gc_relocate.ll b/llvm/test/Transforms/InstSimplify/gc_relocate.ll
index 3f6de8b..894e5ed 100644
--- a/llvm/test/Transforms/InstSimplify/gc_relocate.ll
+++ b/llvm/test/Transforms/InstSimplify/gc_relocate.ll
@@ -11,9 +11,34 @@ define void @dead_relocate(ptr addrspace(1) %in) gc "statepoint-example" {
;
entry:
%safepoint_token = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(ptr addrspace(1) %in)]
- %a = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 0)
+ %a = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 0)
ret void
}
+define ptr addrspace(1) @relocate_undef() gc "statepoint-example" {
+; CHECK-LABEL: @relocate_undef(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(ptr addrspace(1) undef) ]
+; CHECK-NEXT: ret ptr addrspace(1) undef
+;
+entry:
+ %safepoint_token = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(ptr addrspace(1) undef)]
+ %a = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 0)
+ ret ptr addrspace(1) %a
+}
+
+define ptr addrspace(1) @relocate_null() gc "statepoint-example" {
+; CHECK-LABEL: @relocate_null(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(ptr addrspace(1) null) ]
+; CHECK-NEXT: ret ptr addrspace(1) null
+;
+entry:
+ %safepoint_token = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(ptr addrspace(1) null)]
+ %a = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 0)
+ ret ptr addrspace(1) %a
+}
+
+
declare token @llvm.experimental.gc.statepoint.p0(i64, i32, ptr, i32, i32, ...)
declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32, i32)
diff --git a/llvm/test/Transforms/LICM/expr-reassociate-int.ll b/llvm/test/Transforms/LICM/expr-reassociate-int.ll
new file mode 100644
index 0000000..6354897
--- /dev/null
+++ b/llvm/test/Transforms/LICM/expr-reassociate-int.ll
@@ -0,0 +1,364 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
+; RUN: opt -passes='loop-mssa(licm)' -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_CONSTRAINED
+; RUN: opt -passes='loop-mssa(licm)' -licm-max-num-int-reassociations=1 -S < %s | FileCheck %s --check-prefixes=CHECK,CONSTRAINED
+
+;
+; A simple loop:
+;
+; int j;
+;
+; for (j = 0; j <= i; j++)
+; cells[j] = d1 * cells[j + 1] * delta;
+;
+; ...should be transformed by the LICM pass into this:
+;
+; int j;
+; const uint64_t d1d = d1 * delta;
+;
+; for (j = 0; j <= i; j++)
+; cells[j] = d1d * cells[j + 1];
+;
+
+define void @innermost_loop_1d_shouldhoist(i32 %i, i64 %d1, i64 %delta, ptr %cells) {
+; CHECK-LABEL: define void @innermost_loop_1d_shouldhoist
+; CHECK-SAME: (i32 [[I:%.*]], i64 [[D1:%.*]], i64 [[DELTA:%.*]], ptr [[CELLS:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[MUL_1:%.*]] = mul i64 [[DELTA]], [[D1]]
+; CHECK-NEXT: br label [[FOR_COND:%.*]]
+; CHECK: for.cond:
+; CHECK-NEXT: [[J:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_J_1:%.*]], [[FOR_BODY:%.*]] ]
+; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp sgt i32 [[J]], [[I]]
+; CHECK-NEXT: br i1 [[CMP_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; CHECK: for.body:
+; CHECK-NEXT: [[ADD_J_1]] = add nuw nsw i32 [[J]], 1
+; CHECK-NEXT: [[IDXPROM_J_1:%.*]] = zext i32 [[ADD_J_1]] to i64
+; CHECK-NEXT: [[ARRAYIDX_J_1:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J_1]]
+; CHECK-NEXT: [[CELL_1:%.*]] = load i64, ptr [[ARRAYIDX_J_1]], align 8
+; CHECK-NEXT: [[MUL_2:%.*]] = mul i64 [[MUL_1]], [[CELL_1]]
+; CHECK-NEXT: [[IDXPROM_J:%.*]] = zext i32 [[J]] to i64
+; CHECK-NEXT: [[ARRAYIDX_J:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J]]
+; CHECK-NEXT: store i64 [[MUL_2]], ptr [[ARRAYIDX_J]], align 8
+; CHECK-NEXT: br label [[FOR_COND]]
+; CHECK: for.end:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %for.cond
+
+for.cond:
+ %j = phi i32 [ 0, %entry ], [ %add.j.1, %for.body ]
+ %cmp.not = icmp sgt i32 %j, %i
+ br i1 %cmp.not, label %for.end, label %for.body
+
+for.body:
+ %add.j.1 = add nuw nsw i32 %j, 1
+ %idxprom.j.1 = zext i32 %add.j.1 to i64
+ %arrayidx.j.1 = getelementptr inbounds i64, ptr %cells, i64 %idxprom.j.1
+ %cell.1 = load i64, ptr %arrayidx.j.1, align 8
+ %mul.1 = mul i64 %delta, %d1
+ %mul.2 = mul i64 %mul.1, %cell.1
+ %idxprom.j = zext i32 %j to i64
+ %arrayidx.j = getelementptr inbounds i64, ptr %cells, i64 %idxprom.j
+ store i64 %mul.2, ptr %arrayidx.j, align 8
+ br label %for.cond
+
+for.end:
+ ret void
+}
+
+;
+; The following loop will be modified by the 'Reassociate expressions' pass,
+;
+; int j;
+; const uint64_t d1d = d1 * delta;
+; const uint64_t d2d = d2 * delta;
+;
+; for (j = 0; j <= i; j++)
+; cells[j] = d1d * cells[j + 1] + d2d * cells[j];
+;
+; ...into this:
+;
+; int j;
+;
+; for (j = 0; j <= i; j++)
+; cells[j] = (d1 * cells[j + 1] + d2 * cells[j]) * delta;
+;
+; We expect the LICM pass to undo this transformation.
+;
+
+define void @innermost_loop_2d(i32 %i, i64 %d1, i64 %d2, i64 %delta, ptr %cells) {
+; NOT_CONSTRAINED-LABEL: define void @innermost_loop_2d
+; NOT_CONSTRAINED-SAME: (i32 [[I:%.*]], i64 [[D1:%.*]], i64 [[D2:%.*]], i64 [[DELTA:%.*]], ptr [[CELLS:%.*]]) {
+; NOT_CONSTRAINED-NEXT: entry:
+; NOT_CONSTRAINED-NEXT: [[FACTOR_OP_MUL:%.*]] = mul i64 [[D1]], [[DELTA]]
+; NOT_CONSTRAINED-NEXT: [[FACTOR_OP_MUL1:%.*]] = mul i64 [[D2]], [[DELTA]]
+; NOT_CONSTRAINED-NEXT: br label [[FOR_COND:%.*]]
+; NOT_CONSTRAINED: for.cond:
+; NOT_CONSTRAINED-NEXT: [[J:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_J_1:%.*]], [[FOR_BODY:%.*]] ]
+; NOT_CONSTRAINED-NEXT: [[CMP_NOT:%.*]] = icmp sgt i32 [[J]], [[I]]
+; NOT_CONSTRAINED-NEXT: br i1 [[CMP_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; NOT_CONSTRAINED: for.body:
+; NOT_CONSTRAINED-NEXT: [[ADD_J_1]] = add nuw nsw i32 [[J]], 1
+; NOT_CONSTRAINED-NEXT: [[IDXPROM_J_1:%.*]] = zext i32 [[ADD_J_1]] to i64
+; NOT_CONSTRAINED-NEXT: [[ARRAYIDX_J_1:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J_1]]
+; NOT_CONSTRAINED-NEXT: [[CELL_1:%.*]] = load i64, ptr [[ARRAYIDX_J_1]], align 8
+; NOT_CONSTRAINED-NEXT: [[MUL_1:%.*]] = mul i64 [[CELL_1]], [[FACTOR_OP_MUL]]
+; NOT_CONSTRAINED-NEXT: [[IDXPROM_J:%.*]] = zext i32 [[J]] to i64
+; NOT_CONSTRAINED-NEXT: [[ARRAYIDX_J:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J]]
+; NOT_CONSTRAINED-NEXT: [[CELL_2:%.*]] = load i64, ptr [[ARRAYIDX_J]], align 8
+; NOT_CONSTRAINED-NEXT: [[MUL_2:%.*]] = mul i64 [[CELL_2]], [[FACTOR_OP_MUL1]]
+; NOT_CONSTRAINED-NEXT: [[REASS_ADD:%.*]] = add i64 [[MUL_2]], [[MUL_1]]
+; NOT_CONSTRAINED-NEXT: store i64 [[REASS_ADD]], ptr [[ARRAYIDX_J]], align 8
+; NOT_CONSTRAINED-NEXT: br label [[FOR_COND]]
+; NOT_CONSTRAINED: for.end:
+; NOT_CONSTRAINED-NEXT: ret void
+;
+; CONSTRAINED-LABEL: define void @innermost_loop_2d
+; CONSTRAINED-SAME: (i32 [[I:%.*]], i64 [[D1:%.*]], i64 [[D2:%.*]], i64 [[DELTA:%.*]], ptr [[CELLS:%.*]]) {
+; CONSTRAINED-NEXT: entry:
+; CONSTRAINED-NEXT: br label [[FOR_COND:%.*]]
+; CONSTRAINED: for.cond:
+; CONSTRAINED-NEXT: [[J:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_J_1:%.*]], [[FOR_BODY:%.*]] ]
+; CONSTRAINED-NEXT: [[CMP_NOT:%.*]] = icmp sgt i32 [[J]], [[I]]
+; CONSTRAINED-NEXT: br i1 [[CMP_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; CONSTRAINED: for.body:
+; CONSTRAINED-NEXT: [[ADD_J_1]] = add nuw nsw i32 [[J]], 1
+; CONSTRAINED-NEXT: [[IDXPROM_J_1:%.*]] = zext i32 [[ADD_J_1]] to i64
+; CONSTRAINED-NEXT: [[ARRAYIDX_J_1:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J_1]]
+; CONSTRAINED-NEXT: [[CELL_1:%.*]] = load i64, ptr [[ARRAYIDX_J_1]], align 8
+; CONSTRAINED-NEXT: [[MUL_1:%.*]] = mul i64 [[CELL_1]], [[D1]]
+; CONSTRAINED-NEXT: [[IDXPROM_J:%.*]] = zext i32 [[J]] to i64
+; CONSTRAINED-NEXT: [[ARRAYIDX_J:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J]]
+; CONSTRAINED-NEXT: [[CELL_2:%.*]] = load i64, ptr [[ARRAYIDX_J]], align 8
+; CONSTRAINED-NEXT: [[MUL_2:%.*]] = mul i64 [[CELL_2]], [[D2]]
+; CONSTRAINED-NEXT: [[REASS_ADD:%.*]] = add i64 [[MUL_2]], [[MUL_1]]
+; CONSTRAINED-NEXT: [[REASS_MUL:%.*]] = mul i64 [[REASS_ADD]], [[DELTA]]
+; CONSTRAINED-NEXT: store i64 [[REASS_MUL]], ptr [[ARRAYIDX_J]], align 8
+; CONSTRAINED-NEXT: br label [[FOR_COND]]
+; CONSTRAINED: for.end:
+; CONSTRAINED-NEXT: ret void
+;
+entry:
+ br label %for.cond
+
+for.cond:
+ %j = phi i32 [ 0, %entry ], [ %add.j.1, %for.body ]
+ %cmp.not = icmp sgt i32 %j, %i
+ br i1 %cmp.not, label %for.end, label %for.body
+
+for.body:
+ %add.j.1 = add nuw nsw i32 %j, 1
+ %idxprom.j.1 = zext i32 %add.j.1 to i64
+ %arrayidx.j.1 = getelementptr inbounds i64, ptr %cells, i64 %idxprom.j.1
+ %cell.1 = load i64, ptr %arrayidx.j.1, align 8
+ %mul.1 = mul i64 %cell.1, %d1
+ %idxprom.j = zext i32 %j to i64
+ %arrayidx.j = getelementptr inbounds i64, ptr %cells, i64 %idxprom.j
+ %cell.2 = load i64, ptr %arrayidx.j, align 8
+ %mul.2 = mul i64 %cell.2, %d2
+ %reass.add = add i64 %mul.2, %mul.1
+ %reass.mul = mul i64 %reass.add, %delta
+ store i64 %reass.mul, ptr %arrayidx.j, align 8
+ br label %for.cond
+
+for.end:
+ ret void
+}
+
+;
+; The following loop will be modified by the 'Reassociate expressions' pass,
+;
+; int j;
+; const uint64_t d1d = d1 * delta;
+; const uint64_t d2d = d2 * delta;
+; const uint64_t d3d = d3 * delta;
+;
+; for (j = 0; j <= i; j++)
+; cells[j] = d1d * cells[j + 1] + d2d * cells[j] + d3d * cells[j + 2];
+;
+; ...into this:
+;
+; int j;
+;
+; for (j = 0; j <= i; j++)
+; cells[j] = (d1 * cells[j + 1] + d2 * cells[j] + d3 * cells[j + 2]) * delta;
+;
+; We expect the LICM pass to undo this transformation.
+;
+
+
+define void @innermost_loop_3d(i32 %i, i64 %d1, i64 %d2, i64 %d3, i64 %delta, ptr %cells) {
+; NOT_CONSTRAINED-LABEL: define void @innermost_loop_3d
+; NOT_CONSTRAINED-SAME: (i32 [[I:%.*]], i64 [[D1:%.*]], i64 [[D2:%.*]], i64 [[D3:%.*]], i64 [[DELTA:%.*]], ptr [[CELLS:%.*]]) {
+; NOT_CONSTRAINED-NEXT: entry:
+; NOT_CONSTRAINED-NEXT: [[FACTOR_OP_MUL:%.*]] = mul i64 [[D3]], [[DELTA]]
+; NOT_CONSTRAINED-NEXT: [[FACTOR_OP_MUL1:%.*]] = mul i64 [[D1]], [[DELTA]]
+; NOT_CONSTRAINED-NEXT: [[FACTOR_OP_MUL2:%.*]] = mul i64 [[D2]], [[DELTA]]
+; NOT_CONSTRAINED-NEXT: br label [[FOR_COND:%.*]]
+; NOT_CONSTRAINED: for.cond:
+; NOT_CONSTRAINED-NEXT: [[J:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_J_1:%.*]], [[FOR_BODY:%.*]] ]
+; NOT_CONSTRAINED-NEXT: [[CMP_NOT:%.*]] = icmp sgt i32 [[J]], [[I]]
+; NOT_CONSTRAINED-NEXT: br i1 [[CMP_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; NOT_CONSTRAINED: for.body:
+; NOT_CONSTRAINED-NEXT: [[ADD_J_1]] = add nuw nsw i32 [[J]], 1
+; NOT_CONSTRAINED-NEXT: [[IDXPROM_J_1:%.*]] = zext i32 [[ADD_J_1]] to i64
+; NOT_CONSTRAINED-NEXT: [[ARRAYIDX_J_1:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J_1]]
+; NOT_CONSTRAINED-NEXT: [[CELL_1:%.*]] = load i64, ptr [[ARRAYIDX_J_1]], align 8
+; NOT_CONSTRAINED-NEXT: [[MUL_1:%.*]] = mul i64 [[CELL_1]], [[FACTOR_OP_MUL1]]
+; NOT_CONSTRAINED-NEXT: [[IDXPROM_J:%.*]] = zext i32 [[J]] to i64
+; NOT_CONSTRAINED-NEXT: [[ARRAYIDX_J:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J]]
+; NOT_CONSTRAINED-NEXT: [[CELL_2:%.*]] = load i64, ptr [[ARRAYIDX_J]], align 8
+; NOT_CONSTRAINED-NEXT: [[MUL_2:%.*]] = mul i64 [[CELL_2]], [[FACTOR_OP_MUL2]]
+; NOT_CONSTRAINED-NEXT: [[ADD_J_2:%.*]] = add nuw nsw i32 [[J]], 2
+; NOT_CONSTRAINED-NEXT: [[IDXPROM_J_2:%.*]] = zext i32 [[ADD_J_2]] to i64
+; NOT_CONSTRAINED-NEXT: [[ARRAYIDX_J_2:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J_2]]
+; NOT_CONSTRAINED-NEXT: [[CELL_3:%.*]] = load i64, ptr [[ARRAYIDX_J_2]], align 8
+; NOT_CONSTRAINED-NEXT: [[MUL_3:%.*]] = mul i64 [[CELL_3]], [[FACTOR_OP_MUL]]
+; NOT_CONSTRAINED-NEXT: [[REASS_ADD:%.*]] = add i64 [[MUL_2]], [[MUL_1]]
+; NOT_CONSTRAINED-NEXT: [[REASS_ADD1:%.*]] = add i64 [[REASS_ADD]], [[MUL_3]]
+; NOT_CONSTRAINED-NEXT: store i64 [[REASS_ADD1]], ptr [[ARRAYIDX_J_2]], align 8
+; NOT_CONSTRAINED-NEXT: br label [[FOR_COND]]
+; NOT_CONSTRAINED: for.end:
+; NOT_CONSTRAINED-NEXT: ret void
+;
+; CONSTRAINED-LABEL: define void @innermost_loop_3d
+; CONSTRAINED-SAME: (i32 [[I:%.*]], i64 [[D1:%.*]], i64 [[D2:%.*]], i64 [[D3:%.*]], i64 [[DELTA:%.*]], ptr [[CELLS:%.*]]) {
+; CONSTRAINED-NEXT: entry:
+; CONSTRAINED-NEXT: br label [[FOR_COND:%.*]]
+; CONSTRAINED: for.cond:
+; CONSTRAINED-NEXT: [[J:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_J_1:%.*]], [[FOR_BODY:%.*]] ]
+; CONSTRAINED-NEXT: [[CMP_NOT:%.*]] = icmp sgt i32 [[J]], [[I]]
+; CONSTRAINED-NEXT: br i1 [[CMP_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; CONSTRAINED: for.body:
+; CONSTRAINED-NEXT: [[ADD_J_1]] = add nuw nsw i32 [[J]], 1
+; CONSTRAINED-NEXT: [[IDXPROM_J_1:%.*]] = zext i32 [[ADD_J_1]] to i64
+; CONSTRAINED-NEXT: [[ARRAYIDX_J_1:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J_1]]
+; CONSTRAINED-NEXT: [[CELL_1:%.*]] = load i64, ptr [[ARRAYIDX_J_1]], align 8
+; CONSTRAINED-NEXT: [[MUL_1:%.*]] = mul i64 [[CELL_1]], [[D1]]
+; CONSTRAINED-NEXT: [[IDXPROM_J:%.*]] = zext i32 [[J]] to i64
+; CONSTRAINED-NEXT: [[ARRAYIDX_J:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J]]
+; CONSTRAINED-NEXT: [[CELL_2:%.*]] = load i64, ptr [[ARRAYIDX_J]], align 8
+; CONSTRAINED-NEXT: [[MUL_2:%.*]] = mul i64 [[CELL_2]], [[D2]]
+; CONSTRAINED-NEXT: [[ADD_J_2:%.*]] = add nuw nsw i32 [[J]], 2
+; CONSTRAINED-NEXT: [[IDXPROM_J_2:%.*]] = zext i32 [[ADD_J_2]] to i64
+; CONSTRAINED-NEXT: [[ARRAYIDX_J_2:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J_2]]
+; CONSTRAINED-NEXT: [[CELL_3:%.*]] = load i64, ptr [[ARRAYIDX_J_2]], align 8
+; CONSTRAINED-NEXT: [[MUL_3:%.*]] = mul i64 [[CELL_3]], [[D3]]
+; CONSTRAINED-NEXT: [[REASS_ADD:%.*]] = add i64 [[MUL_2]], [[MUL_1]]
+; CONSTRAINED-NEXT: [[REASS_ADD1:%.*]] = add i64 [[REASS_ADD]], [[MUL_3]]
+; CONSTRAINED-NEXT: [[REASS_MUL:%.*]] = mul i64 [[REASS_ADD1]], [[DELTA]]
+; CONSTRAINED-NEXT: store i64 [[REASS_MUL]], ptr [[ARRAYIDX_J_2]], align 8
+; CONSTRAINED-NEXT: br label [[FOR_COND]]
+; CONSTRAINED: for.end:
+; CONSTRAINED-NEXT: ret void
+;
+entry:
+ br label %for.cond
+
+for.cond:
+ %j = phi i32 [ 0, %entry ], [ %add.j.1, %for.body ]
+ %cmp.not = icmp sgt i32 %j, %i
+ br i1 %cmp.not, label %for.end, label %for.body
+
+for.body:
+ %add.j.1 = add nuw nsw i32 %j, 1
+ %idxprom.j.1 = zext i32 %add.j.1 to i64
+ %arrayidx.j.1 = getelementptr inbounds i64, ptr %cells, i64 %idxprom.j.1
+ %cell.1 = load i64, ptr %arrayidx.j.1, align 8
+ %mul.1 = mul i64 %cell.1, %d1
+ %idxprom.j = zext i32 %j to i64
+ %arrayidx.j = getelementptr inbounds i64, ptr %cells, i64 %idxprom.j
+ %cell.2 = load i64, ptr %arrayidx.j, align 8
+ %mul.2 = mul i64 %cell.2, %d2
+ %add.j.2 = add nuw nsw i32 %j, 2
+ %idxprom.j.2 = zext i32 %add.j.2 to i64
+ %arrayidx.j.2 = getelementptr inbounds i64, ptr %cells, i64 %idxprom.j.2
+ %cell.3 = load i64, ptr %arrayidx.j.2, align 8
+ %mul.3 = mul i64 %cell.3, %d3
+ %reass.add = add i64 %mul.2, %mul.1
+ %reass.add1 = add i64 %reass.add, %mul.3
+ %reass.mul = mul i64 %reass.add1, %delta
+ store i64 %reass.mul, ptr %arrayidx.j.2, align 8
+ br label %for.cond
+
+for.end:
+ ret void
+}
+
+;
+; The following loop will not be modified by the LICM pass:
+;
+; int j;
+;
+; for (j = 0; j <= i; j++)
+; cells[j] = (d1 * cells[j + 1] + d2 * cells[j] +
+; cells[j] * cells[j + 1]) * delta;
+;
+; This case differs as one of the multiplications involves no invariants.
+;
+
+define void @innermost_loop_3d_reassociated_different(i32 %i, i64 %d1, i64 %d2, i64 %delta, ptr %cells) {
+; CHECK-LABEL: define void @innermost_loop_3d_reassociated_different
+; CHECK-SAME: (i32 [[I:%.*]], i64 [[D1:%.*]], i64 [[D2:%.*]], i64 [[DELTA:%.*]], ptr [[CELLS:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[FOR_COND:%.*]]
+; CHECK: for.cond:
+; CHECK-NEXT: [[J:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_J_1:%.*]], [[FOR_BODY:%.*]] ]
+; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp sgt i32 [[J]], [[I]]
+; CHECK-NEXT: br i1 [[CMP_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; CHECK: for.body:
+; CHECK-NEXT: [[ADD_J_1]] = add nuw nsw i32 [[J]], 1
+; CHECK-NEXT: [[IDXPROM_J_1:%.*]] = zext i32 [[ADD_J_1]] to i64
+; CHECK-NEXT: [[ARRAYIDX_J_1:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J_1]]
+; CHECK-NEXT: [[CELL_1:%.*]] = load i64, ptr [[ARRAYIDX_J_1]], align 8
+; CHECK-NEXT: [[IDXPROM_J_2:%.*]] = zext i32 [[ADD_J_1]] to i64
+; CHECK-NEXT: [[ARRAYIDX_J_2:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J_2]]
+; CHECK-NEXT: [[CELL_2:%.*]] = load i64, ptr [[ARRAYIDX_J_2]], align 8
+; CHECK-NEXT: [[CELL_3:%.*]] = load i64, ptr [[ARRAYIDX_J_2]], align 8
+; CHECK-NEXT: [[IDXPROM_J:%.*]] = zext i32 [[J]] to i64
+; CHECK-NEXT: [[ARRAYIDX_J:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J]]
+; CHECK-NEXT: [[CELL_4:%.*]] = load i64, ptr [[ARRAYIDX_J]], align 8
+; CHECK-NEXT: [[MUL_1:%.*]] = mul i64 [[CELL_1]], [[D1]]
+; CHECK-NEXT: [[MUL_2:%.*]] = mul i64 [[CELL_4]], [[D2]]
+; CHECK-NEXT: [[EXTRA_MUL:%.*]] = mul i64 [[CELL_3]], [[CELL_2]]
+; CHECK-NEXT: [[REASS_ADD:%.*]] = add i64 [[EXTRA_MUL]], [[MUL_1]]
+; CHECK-NEXT: [[EXTRA_ADD:%.*]] = add i64 [[REASS_ADD]], [[MUL_2]]
+; CHECK-NEXT: [[REASS_MUL:%.*]] = mul i64 [[EXTRA_ADD]], [[DELTA]]
+; CHECK-NEXT: store i64 [[REASS_MUL]], ptr [[ARRAYIDX_J]], align 8
+; CHECK-NEXT: br label [[FOR_COND]]
+; CHECK: for.end:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %for.cond
+
+for.cond:
+ %j = phi i32 [ 0, %entry ], [ %add.j.1, %for.body ]
+ %cmp.not = icmp sgt i32 %j, %i
+ br i1 %cmp.not, label %for.end, label %for.body
+
+for.body:
+ %add.j.1 = add nuw nsw i32 %j, 1
+ %idxprom.j.1 = zext i32 %add.j.1 to i64
+ %arrayidx.j.1 = getelementptr inbounds i64, ptr %cells, i64 %idxprom.j.1
+ %cell.1 = load i64, ptr %arrayidx.j.1, align 8
+ %idxprom.j.2 = zext i32 %add.j.1 to i64
+ %arrayidx.j.2 = getelementptr inbounds i64, ptr %cells, i64 %idxprom.j.2
+ %cell.2 = load i64, ptr %arrayidx.j.2, align 8
+ %idxprom.j.3 = zext i32 %add.j.1 to i64
+ %cell.3 = load i64, ptr %arrayidx.j.2, align 8
+ %idxprom.j = zext i32 %j to i64
+ %arrayidx.j = getelementptr inbounds i64, ptr %cells, i64 %idxprom.j
+ %cell.4 = load i64, ptr %arrayidx.j, align 8
+ %mul.1 = mul i64 %cell.1, %d1
+ %mul.2 = mul i64 %cell.4, %d2
+ %extra.mul = mul i64 %cell.3, %cell.2
+ %reass.add = add i64 %extra.mul, %mul.1
+ %extra.add = add i64 %reass.add, %mul.2
+ %reass.mul = mul i64 %extra.add, %delta
+ store i64 %reass.mul, ptr %arrayidx.j, align 8
+ br label %for.cond
+
+for.end:
+ ret void
+}
diff --git a/llvm/test/Transforms/LICM/sink-foldable.ll b/llvm/test/Transforms/LICM/sink-foldable.ll
index bf2cc77..38577a5 100644
--- a/llvm/test/Transforms/LICM/sink-foldable.ll
+++ b/llvm/test/Transforms/LICM/sink-foldable.ll
@@ -3,7 +3,7 @@
; RUN: opt < %s -passes=licm -S | FileCheck %s
-target triple = "aarch64--linux-gnueabi"
+target triple = "aarch64"
define ptr @test1(i32 %j, ptr readonly %P, ptr readnone %Q) {
; CHECK-LABEL: @test1(
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/arbitrary-induction-step.ll b/llvm/test/Transforms/LoopVectorize/AArch64/arbitrary-induction-step.ll
index cb9ba1b..22aaa56 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/arbitrary-induction-step.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/arbitrary-induction-step.ll
@@ -2,7 +2,7 @@
; RUN: opt -S < %s -passes=loop-vectorize -force-vector-interleave=1 -force-vector-width=2 | FileCheck %s --check-prefix=FORCE-VEC
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
-target triple = "aarch64--linux-gnueabi"
+target triple = "aarch64"
; Test integer induction variable of step 2:
; for (int i = 0; i < 1024; i+=2) {
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/backedge-overflow.ll b/llvm/test/Transforms/LoopVectorize/AArch64/backedge-overflow.ll
index c47a630..4f71959 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/backedge-overflow.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/backedge-overflow.ll
@@ -1,4 +1,4 @@
-; RUN: opt -mtriple=aarch64--linux-gnueabi -passes=loop-vectorize -force-vector-width=4 -force-vector-interleave=1 < %s -S | FileCheck %s
+; RUN: opt -mtriple=aarch64 -passes=loop-vectorize -force-vector-width=4 -force-vector-interleave=1 < %s -S | FileCheck %s
; The following tests contain loops for which SCEV cannot determine the backedge
; taken count. This is because the backedge taken condition is produced by an
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/interleaved_cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/interleaved_cost.ll
index bf9146b..7879872 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/interleaved_cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/interleaved_cost.ll
@@ -5,7 +5,7 @@
; REQUIRES: asserts
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
-target triple = "aarch64--linux-gnueabi"
+target triple = "aarch64"
%i8.2 = type {i8, i8}
define void @i8_factor_2(ptr %data, i64 %n) {
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_memcheck_cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_memcheck_cost.ll
index 8a796bb..800c55d 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_memcheck_cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_memcheck_cost.ll
@@ -177,6 +177,43 @@ outer.exit:
}
+define void @outer_pgo_minus1(ptr nocapture noundef %a, ptr nocapture noundef readonly %b, i64 noundef %m, i64 noundef %n) {
+; CHECK-LABEL: LV: Checking a loop in 'outer_pgo_minus1'
+; CHECK: Calculating cost of runtime checks:
+; CHECK-NOT: We expect runtime memory checks to be hoisted out of the outer loop. Cost reduced
+; CHECK: Total cost of runtime checks: 6
+; CHECK-NEXT: LV: Minimum required TC for runtime checks to be profitable:16
+entry:
+ br label %outer.loop
+
+outer.loop:
+ %outer.iv = phi i64 [ %outer.iv.next, %inner.exit ], [ 0, %entry ]
+ %mul.us = mul nsw i64 %outer.iv, %n
+ br label %inner.loop
+
+inner.loop:
+ %inner.iv = phi i64 [ 0, %outer.loop ], [ %inner.iv.next, %inner.loop ]
+ %add.us = add nuw nsw i64 %inner.iv, %mul.us
+ %arrayidx.us = getelementptr inbounds i8, ptr %b, i64 %add.us
+ %0 = load i8, ptr %arrayidx.us, align 1
+ %arrayidx7.us = getelementptr inbounds i8, ptr %a, i64 %add.us
+ %1 = load i8, ptr %arrayidx7.us, align 1
+ %add9.us = add i8 %1, %0
+ store i8 %add9.us, ptr %arrayidx7.us, align 1
+ %inner.iv.next = add nuw nsw i64 %inner.iv, 1
+ %exitcond.not = icmp eq i64 %inner.iv.next, %n
+ br i1 %exitcond.not, label %inner.exit, label %inner.loop
+
+inner.exit:
+ %outer.iv.next = add nuw nsw i64 %outer.iv, 1
+ %exitcond26.not = icmp eq i64 %outer.iv.next, %m
+ br i1 %exitcond26.not, label %outer.exit, label %outer.loop, !prof !1
+
+outer.exit:
+ ret void
+}
+
+
define void @outer_known_tc3_full_range_checks(ptr nocapture noundef %dst, ptr nocapture noundef readonly %src, i64 noundef %n) {
; CHECK-LABEL: LV: Checking a loop in 'outer_known_tc3_full_range_checks'
; CHECK: Calculating cost of runtime checks:
@@ -215,3 +252,4 @@ outer.exit:
!0 = !{!"branch_weights", i32 10, i32 20}
+!1 = !{!"branch_weights", i32 1, i32 -1}
diff --git a/llvm/test/Transforms/LoopVectorize/X86/amdlibm-calls-finite.ll b/llvm/test/Transforms/LoopVectorize/X86/amdlibm-calls-finite.ll
new file mode 100644
index 0000000..54bb935
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/X86/amdlibm-calls-finite.ll
@@ -0,0 +1,332 @@
+; RUN: opt -vector-library=AMDLIBM -passes=inject-tli-mappings,loop-vectorize -S < %s | FileCheck %s
+
+; Test to verify that when math headers are built with
+; __FINITE_MATH_ONLY__ enabled, causing use of __<func>_finite
+; function versions, vectorization can map these to vector versions.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+declare float @__expf_finite(float) #0
+
+; CHECK-LABEL: @exp_f32
+; CHECK: <4 x float> @amd_vrs4_expf
+; CHECK: ret
+define void @exp_f32(ptr nocapture %varray) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %tmp = trunc i64 %indvars.iv to i32
+ %conv = sitofp i32 %tmp to float
+ %call = tail call fast float @__expf_finite(float %conv)
+ %arrayidx = getelementptr inbounds float, ptr %varray, i64 %indvars.iv
+ store float %call, ptr %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !1
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!1 = distinct !{!1, !2, !3}
+!2 = !{!"llvm.loop.vectorize.width", i32 4}
+!3 = !{!"llvm.loop.vectorize.enable", i1 true}
+
+
+declare double @__exp_finite(double) #0
+
+; CHECK-LABEL: @exp_f64
+; CHECK: <4 x double> @amd_vrd4_exp
+; CHECK: ret
+define void @exp_f64(ptr nocapture %varray) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %tmp = trunc i64 %indvars.iv to i32
+ %conv = sitofp i32 %tmp to double
+ %call = tail call fast double @__exp_finite(double %conv)
+ %arrayidx = getelementptr inbounds double, ptr %varray, i64 %indvars.iv
+ store double %call, ptr %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !11
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!11 = distinct !{!11, !12, !13}
+!12 = !{!"llvm.loop.vectorize.width", i32 4}
+!13 = !{!"llvm.loop.vectorize.enable", i1 true}
+
+
+
+
+declare float @__logf_finite(float) #0
+
+; CHECK-LABEL: @log_f32
+; CHECK: <4 x float> @amd_vrs4_logf
+; CHECK: ret
+define void @log_f32(ptr nocapture %varray) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %tmp = trunc i64 %indvars.iv to i32
+ %conv = sitofp i32 %tmp to float
+ %call = tail call fast float @__logf_finite(float %conv)
+ %arrayidx = getelementptr inbounds float, ptr %varray, i64 %indvars.iv
+ store float %call, ptr %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !21
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!21 = distinct !{!21, !22, !23}
+!22 = !{!"llvm.loop.vectorize.width", i32 4}
+!23 = !{!"llvm.loop.vectorize.enable", i1 true}
+
+
+declare double @__log_finite(double) #0
+
+; CHECK-LABEL: @log_f64
+; CHECK: <4 x double> @amd_vrd4_log
+; CHECK: ret
+define void @log_f64(ptr nocapture %varray) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %tmp = trunc i64 %indvars.iv to i32
+ %conv = sitofp i32 %tmp to double
+ %call = tail call fast double @__log_finite(double %conv)
+ %arrayidx = getelementptr inbounds double, ptr %varray, i64 %indvars.iv
+ store double %call, ptr %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !31
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!31 = distinct !{!31, !32, !33}
+!32 = !{!"llvm.loop.vectorize.width", i32 4}
+!33 = !{!"llvm.loop.vectorize.enable", i1 true}
+
+
+declare float @__powf_finite(float, float) #0
+
+; CHECK-LABEL: @pow_f32
+; CHECK: <4 x float> @amd_vrs4_powf
+; CHECK: ret
+define void @pow_f32(ptr nocapture %varray, ptr nocapture readonly %exp) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %tmp = trunc i64 %indvars.iv to i32
+ %conv = sitofp i32 %tmp to float
+ %arrayidx = getelementptr inbounds float, ptr %exp, i64 %indvars.iv
+ %tmp1 = load float, ptr %arrayidx, align 4
+ %tmp2 = tail call fast float @__powf_finite(float %conv, float %tmp1)
+ %arrayidx2 = getelementptr inbounds float, ptr %varray, i64 %indvars.iv
+ store float %tmp2, ptr %arrayidx2, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !41
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!41 = distinct !{!41, !42, !43}
+!42 = !{!"llvm.loop.vectorize.width", i32 4}
+!43 = !{!"llvm.loop.vectorize.enable", i1 true}
+
+
+declare double @__pow_finite(double, double) #0
+
+; CHECK-LABEL: @pow_f64
+; CHECK: <4 x double> @amd_vrd4_pow
+; CHECK: ret
+define void @pow_f64(ptr nocapture %varray, ptr nocapture readonly %exp) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %tmp = trunc i64 %indvars.iv to i32
+ %conv = sitofp i32 %tmp to double
+ %arrayidx = getelementptr inbounds double, ptr %exp, i64 %indvars.iv
+ %tmp1 = load double, ptr %arrayidx, align 4
+ %tmp2 = tail call fast double @__pow_finite(double %conv, double %tmp1)
+ %arrayidx2 = getelementptr inbounds double, ptr %varray, i64 %indvars.iv
+ store double %tmp2, ptr %arrayidx2, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !51
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!51 = distinct !{!51, !52, !53}
+!52 = !{!"llvm.loop.vectorize.width", i32 4}
+!53 = !{!"llvm.loop.vectorize.enable", i1 true}
+
+declare float @__exp2f_finite(float) #0
+
+define void @exp2f_finite(ptr nocapture %varray) {
+; CHECK-LABEL: @exp2f_finite(
+; CHECK: call <4 x float> @amd_vrs4_exp2f(<4 x float> %{{.*}})
+; CHECK: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to float
+ %call = tail call float @__exp2f_finite(float %conv)
+ %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
+ store float %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !61
+
+for.end:
+ ret void
+}
+
+!61 = distinct !{!61, !62, !63}
+!62 = !{!"llvm.loop.vectorize.width", i32 4}
+!63 = !{!"llvm.loop.vectorize.enable", i1 true}
+
+declare double @__exp2_finite(double) #0
+
+define void @exp2_finite(ptr nocapture %varray) {
+; CHECK-LABEL: @exp2_finite(
+; CHECK: call <4 x double> @amd_vrd4_exp2(<4 x double> {{.*}})
+; CHECK: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to double
+ %call = tail call double @__exp2_finite(double %conv)
+ %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
+ store double %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !71
+
+for.end:
+ ret void
+}
+
+!71 = distinct !{!71, !72, !73}
+!72 = !{!"llvm.loop.vectorize.width", i32 4}
+!73 = !{!"llvm.loop.vectorize.enable", i1 true}
+
+declare float @__log2f_finite(float) #0
+
+; CHECK-LABEL: @log2_f32
+; CHECK: <4 x float> @amd_vrs4_log2f
+; CHECK: ret
+define void @log2_f32(ptr nocapture %varray) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %tmp = trunc i64 %indvars.iv to i32
+ %conv = sitofp i32 %tmp to float
+ %call = tail call fast float @__log2f_finite(float %conv)
+ %arrayidx = getelementptr inbounds float, ptr %varray, i64 %indvars.iv
+ store float %call, ptr %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !21
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!81 = distinct !{!21, !22, !23}
+!82 = !{!"llvm.loop.vectorize.width", i32 4}
+!83 = !{!"llvm.loop.vectorize.enable", i1 true}
+
+
+declare double @__log2_finite(double) #0
+
+; CHECK-LABEL: @log2_f64
+; CHECK: <4 x double> @amd_vrd4_log2
+; CHECK: ret
+define void @log2_f64(ptr nocapture %varray) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %tmp = trunc i64 %indvars.iv to i32
+ %conv = sitofp i32 %tmp to double
+ %call = tail call fast double @__log2_finite(double %conv)
+ %arrayidx = getelementptr inbounds double, ptr %varray, i64 %indvars.iv
+ store double %call, ptr %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !31
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!91 = distinct !{!31, !32, !33}
+!92 = !{!"llvm.loop.vectorize.width", i32 4}
+!93 = !{!"llvm.loop.vectorize.enable", i1 true}
+
+declare float @__log10f_finite(float) #0
+
+; CHECK-LABEL: @log10_f32
+; CHECK: <4 x float> @amd_vrs4_log10f
+; CHECK: ret
+define void @log10_f32(ptr nocapture %varray) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %tmp = trunc i64 %indvars.iv to i32
+ %conv = sitofp i32 %tmp to float
+ %call = tail call fast float @__log10f_finite(float %conv)
+ %arrayidx = getelementptr inbounds float, ptr %varray, i64 %indvars.iv
+ store float %call, ptr %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !21
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!101 = distinct !{!21, !22, !23}
+!102 = !{!"llvm.loop.vectorize.width", i32 4}
+!103 = !{!"llvm.loop.vectorize.enable", i1 true}
+
+
diff --git a/llvm/test/Transforms/LoopVectorize/X86/amdlibm-calls.ll b/llvm/test/Transforms/LoopVectorize/X86/amdlibm-calls.ll
new file mode 100644
index 0000000..8d2820a
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/X86/amdlibm-calls.ll
@@ -0,0 +1,869 @@
+; RUN: opt -vector-library=AMDLIBM -passes=inject-tli-mappings,loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -mattr=avx -S < %s | FileCheck %s
+; RUN: opt -vector-library=AMDLIBM -passes=inject-tli-mappings,loop-vectorize -force-vector-width=8 -force-vector-interleave=1 -mattr=+avx512f -S < %s | FileCheck %s --check-prefix=CHECK-AVX512-VF8
+; RUN: opt -vector-library=AMDLIBM -passes=inject-tli-mappings,loop-vectorize -force-vector-width=16 -force-vector-interleave=1 -mattr=+avx512f -S < %s | FileCheck %s --check-prefix=CHECK-AVX512-VF16
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+declare double @sin(double) #0
+declare float @sinf(float) #0
+declare double @llvm.sin.f64(double) #0
+declare float @llvm.sin.f32(float) #0
+
+declare double @cos(double) #0
+declare float @cosf(float) #0
+declare double @llvm.cos.f64(double) #0
+declare float @llvm.cos.f32(float) #0
+
+declare double @pow(double, double) #0
+declare float @powf(float, float) #0
+declare double @llvm.pow.f64(double, double) #0
+declare float @llvm.pow.f32(float, float) #0
+
+declare double @exp(double) #0
+declare float @expf(float) #0
+declare double @llvm.exp.f64(double) #0
+declare float @llvm.exp.f32(float) #0
+
+declare double @log(double) #0
+declare float @logf(float) #0
+declare double @llvm.log.f64(double) #0
+declare float @llvm.log.f32(float) #0
+
+declare double @log2(double) #0
+declare float @log2f(float) #0
+declare double @llvm.log2.f64(double) #0
+declare float @llvm.log2.f32(float) #0
+
+declare double @log10(double) #0
+declare float @log10f(float) #0
+declare double @llvm.log10.f64(double) #0
+declare float @llvm.log10.f32(float) #0
+
+declare double @sqrt(double) #0
+declare float @sqrtf(float) #0
+
+declare double @exp2(double) #0
+declare float @exp2f(float) #0
+declare double @llvm.exp2.f64(double) #0
+declare float @llvm.exp2.f32(float) #0
+
+define void @sin_f64(ptr nocapture %varray) {
+; CHECK-LABEL: @sin_f64(
+; CHECK: [[TMP5:%.*]] = call <4 x double> @amd_vrd4_sin(<4 x double> [[TMP4:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF8-LABEL: @sin_f64(
+; CHECK-AVX512-VF8: [[TMP5:%.*]] = call <8 x double> @amd_vrd8_sin(<8 x double> [[TMP4:%.*]])
+; CHECK-AVX512-VF8: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to double
+ %call = tail call double @sin(double %conv)
+ %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
+ store double %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @sin_f32(ptr nocapture %varray) {
+; CHECK-LABEL: @sin_f32(
+; CHECK: [[TMP5:%.*]] = call <4 x float> @amd_vrs4_sinf(<4 x float> [[TMP4:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF16-LABEL: @sin_f32(
+; CHECK-AVX512-VF16: [[TMP5:%.*]] = call <16 x float> @amd_vrs16_sinf(<16 x float> [[TMP4:%.*]])
+; CHECK-AVX512-VF16: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to float
+ %call = tail call float @sinf(float %conv)
+ %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
+ store float %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @sin_f64_intrinsic(ptr nocapture %varray) {
+; CHECK-LABEL: @sin_f64_intrinsic(
+; CHECK: [[TMP5:%.*]] = call <4 x double> @amd_vrd4_sin(<4 x double> [[TMP4:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF8-LABEL: @sin_f64_intrinsic(
+; CHECK-AVX512-VF8: [[TMP5:%.*]] = call <8 x double> @amd_vrd8_sin(<8 x double> [[TMP4:%.*]])
+; CHECK-AVX512-VF8: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to double
+ %call = tail call double @llvm.sin.f64(double %conv)
+ %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
+ store double %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @sin_f32_intrinsic(ptr nocapture %varray) {
+; CHECK-LABEL: @sin_f32_intrinsic(
+; CHECK: [[TMP5:%.*]] = call <4 x float> @amd_vrs4_sinf(<4 x float> [[TMP4:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF16-LABEL: @sin_f32_intrinsic(
+; CHECK-AVX512-VF16: [[TMP5:%.*]] = call <16 x float> @amd_vrs16_sinf(<16 x float> [[TMP4:%.*]])
+; CHECK-AVX512-VF16: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to float
+ %call = tail call float @llvm.sin.f32(float %conv)
+ %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
+ store float %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @cos_f64(ptr nocapture %varray) {
+; CHECK-LABEL: @cos_f64(
+; CHECK: [[TMP5:%.*]] = call <4 x double> @amd_vrd4_cos(<4 x double> [[TMP4:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF8-LABEL: @cos_f64(
+; CHECK-AVX512-VF8: [[TMP5:%.*]] = call <8 x double> @amd_vrd8_cos(<8 x double> [[TMP4:%.*]])
+; CHECK-AVX512-VF8: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to double
+ %call = tail call double @cos(double %conv)
+ %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
+ store double %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @cos_f32(ptr nocapture %varray) {
+; CHECK-LABEL: @cos_f32(
+; CHECK: [[TMP5:%.*]] = call <4 x float> @amd_vrs4_cosf(<4 x float> [[TMP4:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF16-LABEL: @cos_f32(
+; CHECK-AVX512-VF16: [[TMP5:%.*]] = call <16 x float> @amd_vrs16_cosf(<16 x float> [[TMP4:%.*]])
+; CHECK-AVX512-VF16: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to float
+ %call = tail call float @cosf(float %conv)
+ %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
+ store float %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @cos_f64_intrinsic(ptr nocapture %varray) {
+; CHECK-LABEL: @cos_f64_intrinsic(
+; CHECK: [[TMP5:%.*]] = call <4 x double> @amd_vrd4_cos(<4 x double> [[TMP4:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF8-LABEL: @cos_f64_intrinsic(
+; CHECK-AVX512-VF8: [[TMP5:%.*]] = call <8 x double> @amd_vrd8_cos(<8 x double> [[TMP4:%.*]])
+; CHECK-AVX512-VF8: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to double
+ %call = tail call double @llvm.cos.f64(double %conv)
+ %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
+ store double %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @cos_f32_intrinsic(ptr nocapture %varray) {
+; CHECK-LABEL: @cos_f32_intrinsic(
+; CHECK: [[TMP5:%.*]] = call <4 x float> @amd_vrs4_cosf(<4 x float> [[TMP4:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF16-LABEL: @cos_f32_intrinsic(
+; CHECK-AVX512-VF16: [[TMP5:%.*]] = call <16 x float> @amd_vrs16_cosf(<16 x float> [[TMP4:%.*]])
+; CHECK-AVX512-VF16: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to float
+ %call = tail call float @llvm.cos.f32(float %conv)
+ %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
+ store float %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @pow_f64(ptr nocapture %varray, ptr nocapture readonly %exp) {
+; CHECK-LABEL: @pow_f64(
+; CHECK: [[TMP8:%.*]] = call <4 x double> @amd_vrd4_pow(<4 x double> [[TMP4:%.*]], <4 x double> [[WIDE_LOAD:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF8-LABEL: @pow_f64(
+; CHECK-AVX512-VF8: [[TMP8:%.*]] = call <8 x double> @amd_vrd8_pow(<8 x double> [[TMP4:%.*]], <8 x double> [[WIDE_LOAD:%.*]])
+; CHECK-AVX512-VF8: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to double
+ %arrayidx = getelementptr inbounds double, ptr %exp, i64 %iv
+ %tmp1 = load double, ptr %arrayidx, align 4
+ %tmp2 = tail call double @pow(double %conv, double %tmp1)
+ %arrayidx2 = getelementptr inbounds double, ptr %varray, i64 %iv
+ store double %tmp2, ptr %arrayidx2, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @pow_f64_intrinsic(ptr nocapture %varray, ptr nocapture readonly %exp) {
+; CHECK-LABEL: @pow_f64_intrinsic(
+; CHECK: [[TMP8:%.*]] = call <4 x double> @amd_vrd4_pow(<4 x double> [[TMP4:%.*]], <4 x double> [[WIDE_LOAD:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF8-LABEL: @pow_f64_intrinsic(
+; CHECK-AVX512-VF8: [[TMP8:%.*]] = call <8 x double> @amd_vrd8_pow(<8 x double> [[TMP4:%.*]], <8 x double> [[WIDE_LOAD:%.*]])
+; CHECK-AVX512-VF8: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to double
+ %arrayidx = getelementptr inbounds double, ptr %exp, i64 %iv
+ %tmp1 = load double, ptr %arrayidx, align 4
+ %tmp2 = tail call double @llvm.pow.f64(double %conv, double %tmp1)
+ %arrayidx2 = getelementptr inbounds double, ptr %varray, i64 %iv
+ store double %tmp2, ptr %arrayidx2, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @pow_f32(ptr nocapture %varray, ptr nocapture readonly %exp) {
+; CHECK-LABEL: @pow_f32(
+; CHECK: [[TMP8:%.*]] = call <4 x float> @amd_vrs4_powf(<4 x float> [[TMP4:%.*]], <4 x float> [[WIDE_LOAD:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF16-LABEL: @pow_f32(
+; CHECK-AVX512-VF16: [[TMP8:%.*]] = call <16 x float> @amd_vrs16_powf(<16 x float> [[TMP4:%.*]], <16 x float> [[WIDE_LOAD:%.*]])
+; CHECK-AVX512-VF16: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to float
+ %arrayidx = getelementptr inbounds float, ptr %exp, i64 %iv
+ %tmp1 = load float, ptr %arrayidx, align 4
+ %tmp2 = tail call float @powf(float %conv, float %tmp1)
+ %arrayidx2 = getelementptr inbounds float, ptr %varray, i64 %iv
+ store float %tmp2, ptr %arrayidx2, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @pow_f32_intrinsic(ptr nocapture %varray, ptr nocapture readonly %exp) {
+; CHECK-LABEL: @pow_f32_intrinsic(
+; CHECK: [[TMP8:%.*]] = call <4 x float> @amd_vrs4_powf(<4 x float> [[TMP4:%.*]], <4 x float> [[WIDE_LOAD:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF16-LABEL: @pow_f32_intrinsic(
+; CHECK-AVX512-VF16: [[TMP8:%.*]] = call <16 x float> @amd_vrs16_powf(<16 x float> [[TMP4:%.*]], <16 x float> [[WIDE_LOAD:%.*]])
+; CHECK-AVX512-VF16: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to float
+ %arrayidx = getelementptr inbounds float, ptr %exp, i64 %iv
+ %tmp1 = load float, ptr %arrayidx, align 4
+ %tmp2 = tail call float @llvm.pow.f32(float %conv, float %tmp1)
+ %arrayidx2 = getelementptr inbounds float, ptr %varray, i64 %iv
+ store float %tmp2, ptr %arrayidx2, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @exp_f64(ptr nocapture %varray) {
+; CHECK-LABEL: @exp_f64(
+; CHECK: [[TMP5:%.*]] = call <4 x double> @amd_vrd4_exp(<4 x double> [[TMP4:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF8-LABEL: @exp_f64(
+; CHECK-AVX512-VF8: [[TMP5:%.*]] = call <8 x double> @amd_vrd8_exp(<8 x double> [[TMP4:%.*]])
+; CHECK-AVX512-VF8: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to double
+ %call = tail call double @exp(double %conv)
+ %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
+ store double %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @exp_f32(ptr nocapture %varray) {
+; CHECK-LABEL: @exp_f32(
+; CHECK: [[TMP5:%.*]] = call <4 x float> @amd_vrs4_expf(<4 x float> [[TMP4:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF16-LABEL: @exp_f32(
+; CHECK-AVX512-VF16: [[TMP5:%.*]] = call <16 x float> @amd_vrs16_expf(<16 x float> [[TMP4:%.*]])
+; CHECK-AVX512-VF16: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to float
+ %call = tail call float @expf(float %conv)
+ %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
+ store float %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @exp_f64_intrinsic(ptr nocapture %varray) {
+; CHECK-LABEL: @exp_f64_intrinsic(
+; CHECK: [[TMP5:%.*]] = call <4 x double> @amd_vrd4_exp(<4 x double> [[TMP4:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF8-LABEL: @exp_f64_intrinsic(
+; CHECK-AVX512-VF8: [[TMP5:%.*]] = call <8 x double> @amd_vrd8_exp(<8 x double> [[TMP4:%.*]])
+; CHECK-AVX512-VF8: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to double
+ %call = tail call double @llvm.exp.f64(double %conv)
+ %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
+ store double %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @exp_f32_intrinsic(ptr nocapture %varray) {
+; CHECK-LABEL: @exp_f32_intrinsic(
+; CHECK: [[TMP5:%.*]] = call <4 x float> @amd_vrs4_expf(<4 x float> [[TMP4:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF16-LABEL: @exp_f32_intrinsic(
+; CHECK-AVX512-VF16: [[TMP5:%.*]] = call <16 x float> @amd_vrs16_expf(<16 x float> [[TMP4:%.*]])
+; CHECK-AVX512-VF16: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to float
+ %call = tail call float @llvm.exp.f32(float %conv)
+ %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
+ store float %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @log_f64(ptr nocapture %varray) {
+; CHECK-LABEL: @log_f64(
+; CHECK: [[TMP5:%.*]] = call <4 x double> @amd_vrd4_log(<4 x double> [[TMP4:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF8-LABEL: @log_f64(
+; CHECK-AVX512-VF8: [[TMP5:%.*]] = call <8 x double> @amd_vrd8_log(<8 x double> [[TMP4:%.*]])
+; CHECK-AVX512-VF8: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to double
+ %call = tail call double @log(double %conv)
+ %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
+ store double %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @log_f32(ptr nocapture %varray) {
+; CHECK-LABEL: @log_f32(
+; CHECK: [[TMP5:%.*]] = call <4 x float> @amd_vrs4_logf(<4 x float> [[TMP4:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF16-LABEL: @log_f32(
+; CHECK-AVX512-VF16: [[TMP5:%.*]] = call <16 x float> @amd_vrs16_logf(<16 x float> [[TMP4:%.*]])
+; CHECK-AVX512-VF16: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to float
+ %call = tail call float @logf(float %conv)
+ %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
+ store float %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @log_f64_intrinsic(ptr nocapture %varray) {
+; CHECK-LABEL: @log_f64_intrinsic(
+; CHECK: [[TMP5:%.*]] = call <4 x double> @amd_vrd4_log(<4 x double> [[TMP4:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF8-LABEL: @log_f64_intrinsic(
+; CHECK-AVX512-VF8: [[TMP5:%.*]] = call <8 x double> @amd_vrd8_log(<8 x double> [[TMP4:%.*]])
+; CHECK-AVX512-VF8: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to double
+ %call = tail call double @llvm.log.f64(double %conv)
+ %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
+ store double %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @log_f32_intrinsic(ptr nocapture %varray) {
+; CHECK-LABEL: @log_f32_intrinsic(
+; CHECK: [[TMP5:%.*]] = call <4 x float> @amd_vrs4_logf(<4 x float> [[TMP4:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF16-LABEL: @log_f32_intrinsic(
+; CHECK-AVX512-VF16: [[TMP5:%.*]] = call <16 x float> @amd_vrs16_logf(<16 x float> [[TMP4:%.*]])
+; CHECK-AVX512-VF16: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to float
+ %call = tail call float @llvm.log.f32(float %conv)
+ %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
+ store float %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @log2_f64(ptr nocapture %varray) {
+; CHECK-LABEL: @log2_f64(
+; CHECK: [[TMP5:%.*]] = call <4 x double> @amd_vrd4_log2(<4 x double> [[TMP4:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF8-LABEL: @log2_f64(
+; CHECK-AVX512-VF8: [[TMP5:%.*]] = call <8 x double> @amd_vrd8_log2(<8 x double> [[TMP4:%.*]])
+; CHECK-AVX512-VF8: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to double
+ %call = tail call double @log2(double %conv)
+ %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
+ store double %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @log2_f32(ptr nocapture %varray) {
+; CHECK-LABEL: @log2_f32(
+; CHECK: [[TMP5:%.*]] = call <4 x float> @amd_vrs4_log2f(<4 x float> [[TMP4:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF16-LABEL: @log2_f32(
+; CHECK-AVX512-VF16: [[TMP5:%.*]] = call <16 x float> @amd_vrs16_log2f(<16 x float> [[TMP4:%.*]])
+; CHECK-AVX512-VF16: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to float
+ %call = tail call float @log2f(float %conv)
+ %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
+ store float %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @log2_f64_intrinsic(ptr nocapture %varray) {
+; CHECK-LABEL: @log2_f64_intrinsic(
+; CHECK: [[TMP5:%.*]] = call <4 x double> @amd_vrd4_log2(<4 x double> [[TMP4:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF8-LABEL: @log2_f64_intrinsic(
+; CHECK-AVX512-VF8: [[TMP5:%.*]] = call <8 x double> @amd_vrd8_log2(<8 x double> [[TMP4:%.*]])
+; CHECK-AVX512-VF8: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to double
+ %call = tail call double @llvm.log2.f64(double %conv)
+ %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
+ store double %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @log2_f32_intrinsic(ptr nocapture %varray) {
+; CHECK-LABEL: @log2_f32_intrinsic(
+; CHECK: [[TMP5:%.*]] = call <4 x float> @amd_vrs4_log2f(<4 x float> [[TMP4:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF16-LABEL: @log2_f32_intrinsic(
+; CHECK-AVX512-VF16: [[TMP5:%.*]] = call <16 x float> @amd_vrs16_log2f(<16 x float> [[TMP4:%.*]])
+; CHECK-AVX512-VF16: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to float
+ %call = tail call float @llvm.log2.f32(float %conv)
+ %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
+ store float %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @log10_f32(ptr nocapture %varray) {
+; CHECK-LABEL: @log10_f32(
+; CHECK: [[TMP5:%.*]] = call <4 x float> @amd_vrs4_log10f(<4 x float> [[TMP4:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF16-LABEL: @log10_f32(
+; CHECK-AVX512-VF16: [[TMP5:%.*]] = call <16 x float> @amd_vrs16_log10f(<16 x float> [[TMP4:%.*]])
+; CHECK-AVX512-VF16: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to float
+ %call = tail call float @log10f(float %conv)
+ %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
+ store float %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @log10_f32_intrinsic(ptr nocapture %varray) {
+; CHECK-LABEL: @log10_f32_intrinsic(
+; CHECK: [[TMP5:%.*]] = call <4 x float> @amd_vrs4_log10f(<4 x float> [[TMP4:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF16-LABEL: @log10_f32_intrinsic(
+; CHECK-AVX512-VF16: [[TMP5:%.*]] = call <16 x float> @amd_vrs16_log10f(<16 x float> [[TMP4:%.*]])
+; CHECK-AVX512-VF16: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to float
+ %call = tail call float @llvm.log10.f32(float %conv)
+ %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
+ store float %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @exp2_f64(ptr nocapture %varray) {
+; CHECK-LABEL: @exp2_f64(
+; CHECK: [[TMP5:%.*]] = call <4 x double> @amd_vrd4_exp2(<4 x double> [[TMP4:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF8-LABEL: @exp2_f64(
+; CHECK-AVX512-VF8: [[TMP5:%.*]] = call <8 x double> @amd_vrd8_exp2(<8 x double> [[TMP4:%.*]])
+; CHECK-AVX512-VF8: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to double
+ %call = tail call double @exp2(double %conv)
+ %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
+ store double %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @exp2_f32(ptr nocapture %varray) {
+; CHECK-LABEL: @exp2_f32(
+; CHECK: [[TMP5:%.*]] = call <4 x float> @amd_vrs4_exp2f(<4 x float> [[TMP4:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF16-LABEL: @exp2_f32(
+; CHECK-AVX512-VF16: [[TMP5:%.*]] = call <16 x float> @amd_vrs16_exp2f(<16 x float> [[TMP4:%.*]])
+; CHECK-AVX512-VF16: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to float
+ %call = tail call float @exp2f(float %conv)
+ %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
+ store float %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @exp2_f64_intrinsic(ptr nocapture %varray) {
+; CHECK-LABEL: @exp2_f64_intrinsic(
+; CHECK: [[TMP5:%.*]] = call <4 x double> @amd_vrd4_exp2(<4 x double> [[TMP4:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF8-LABEL: @exp2_f64_intrinsic(
+; CHECK-AVX512-VF8: [[TMP5:%.*]] = call <8 x double> @amd_vrd8_exp2(<8 x double> [[TMP4:%.*]])
+; CHECK-AVX512-VF8: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to double
+ %call = tail call double @llvm.exp2.f64(double %conv)
+ %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
+ store double %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+define void @exp2_f32_intrinsic(ptr nocapture %varray) {
+; CHECK-LABEL: @exp2_f32_intrinsic(
+; CHECK: [[TMP5:%.*]] = call <4 x float> @amd_vrs4_exp2f(<4 x float> [[TMP4:%.*]])
+; CHECK: ret void
+;
+; CHECK-AVX512-VF16-LABEL: @exp2_f32_intrinsic(
+; CHECK-AVX512-VF16: [[TMP5:%.*]] = call <16 x float> @amd_vrs16_exp2f(<16 x float> [[TMP4:%.*]])
+; CHECK-AVX512-VF16: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %tmp = trunc i64 %iv to i32
+ %conv = sitofp i32 %tmp to float
+ %call = tail call float @llvm.exp2.f32(float %conv)
+ %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
+ store float %call, ptr %arrayidx, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+attributes #0 = { nounwind readnone }
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/sin-sqrt.ll b/llvm/test/Transforms/SLPVectorizer/X86/sin-sqrt.ll
index c02b031..9810d50 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/sin-sqrt.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/sin-sqrt.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -mtriple=x86_64-unknown-linux -mcpu=skylake-avx512 -passes=slp-vectorizer -S | FileCheck %s
; RUN: opt < %s -mtriple=x86_64-unknown-linux -mcpu=skylake-avx512 -passes=inject-tli-mappings,slp-vectorizer -vector-library=SVML -S | FileCheck %s --check-prefix=VECLIB
+; RUN: opt < %s -mtriple=x86_64-unknown-linux -mcpu=skylake-avx512 -passes=inject-tli-mappings,slp-vectorizer -vector-library=AMDLIBM -S | FileCheck %s --check-prefix=AMDLIBM
@src = common global [8 x double] zeroinitializer, align 64
@dst = common global [8 x double] zeroinitializer, align 64
@@ -63,7 +64,33 @@ define void @test() {
; VECLIB-NEXT: store <2 x double> [[TMP15]], ptr @dst, align 8
; VECLIB-NEXT: ret void
;
-
+; AMDLIBM-LABEL: @test(
+; AMDLIBM-NEXT: [[A0:%.*]] = load double, ptr @src, align 8
+; AMDLIBM-NEXT: [[A1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 1), align 8
+; AMDLIBM-NEXT: [[A2:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 2), align 8
+; AMDLIBM-NEXT: [[A3:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 3), align 8
+; AMDLIBM-NEXT: [[A4:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 4), align 8
+; AMDLIBM-NEXT: [[A5:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 5), align 8
+; AMDLIBM-NEXT: [[A6:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 6), align 8
+; AMDLIBM-NEXT: [[A7:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 7), align 8
+; AMDLIBM-NEXT: [[TMP1:%.*]] = insertelement <2 x double> poison, double [[A2]], i32 0
+; AMDLIBM-NEXT: [[TMP2:%.*]] = insertelement <2 x double> [[TMP1]], double [[A6]], i32 1
+; AMDLIBM-NEXT: [[TMP3:%.*]] = call fast <2 x double> @amd_vrd2_sin(<2 x double> [[TMP2]])
+; AMDLIBM-NEXT: [[TMP4:%.*]] = insertelement <2 x double> poison, double [[A3]], i32 0
+; AMDLIBM-NEXT: [[TMP5:%.*]] = insertelement <2 x double> [[TMP4]], double [[A7]], i32 1
+; AMDLIBM-NEXT: [[TMP6:%.*]] = call fast <2 x double> @amd_vrd2_sin(<2 x double> [[TMP5]])
+; AMDLIBM-NEXT: [[TMP7:%.*]] = insertelement <2 x double> poison, double [[A0]], i32 0
+; AMDLIBM-NEXT: [[TMP8:%.*]] = insertelement <2 x double> [[TMP7]], double [[A4]], i32 1
+; AMDLIBM-NEXT: [[TMP9:%.*]] = call fast <2 x double> @llvm.sqrt.v2f64(<2 x double> [[TMP8]])
+; AMDLIBM-NEXT: [[TMP10:%.*]] = insertelement <2 x double> poison, double [[A1]], i32 0
+; AMDLIBM-NEXT: [[TMP11:%.*]] = insertelement <2 x double> [[TMP10]], double [[A5]], i32 1
+; AMDLIBM-NEXT: [[TMP12:%.*]] = call fast <2 x double> @llvm.sqrt.v2f64(<2 x double> [[TMP11]])
+; AMDLIBM-NEXT: [[TMP13:%.*]] = fadd fast <2 x double> [[TMP9]], [[TMP6]]
+; AMDLIBM-NEXT: [[TMP14:%.*]] = fadd fast <2 x double> [[TMP3]], [[TMP12]]
+; AMDLIBM-NEXT: [[TMP15:%.*]] = fadd fast <2 x double> [[TMP13]], [[TMP14]]
+; AMDLIBM-NEXT: store <2 x double> [[TMP15]], ptr @dst, align 8
+; AMDLIBM-NEXT: ret void
+;
%a0 = load double, ptr @src, align 8
%a1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 1), align 8
%a2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 2), align 8
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/vec3-base.ll b/llvm/test/Transforms/SLPVectorizer/X86/vec3-base.ll
new file mode 100644
index 0000000..6560fc6
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/vec3-base.ll
@@ -0,0 +1,317 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -passes=slp-vectorizer -mtriple=x86_64-apple-macosx -S %s | FileCheck %s
+
+define void @v3_load_i32_mul_by_constant_store(ptr %src, ptr %dst) {
+; CHECK-LABEL: @v3_load_i32_mul_by_constant_store(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i32 0
+; CHECK-NEXT: [[L_SRC_0:%.*]] = load i32, ptr [[GEP_SRC_0]], align 4
+; CHECK-NEXT: [[MUL_0:%.*]] = mul nsw i32 [[L_SRC_0]], 10
+; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 1
+; CHECK-NEXT: [[L_SRC_1:%.*]] = load i32, ptr [[GEP_SRC_1]], align 4
+; CHECK-NEXT: [[MUL_1:%.*]] = mul nsw i32 [[L_SRC_1]], 10
+; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 2
+; CHECK-NEXT: [[L_SRC_2:%.*]] = load i32, ptr [[GEP_SRC_2]], align 4
+; CHECK-NEXT: [[MUL_2:%.*]] = mul nsw i32 [[L_SRC_2]], 10
+; CHECK-NEXT: store i32 [[MUL_0]], ptr [[DST:%.*]], align 4
+; CHECK-NEXT: [[DST_1:%.*]] = getelementptr i32, ptr [[DST]], i32 1
+; CHECK-NEXT: store i32 [[MUL_1]], ptr [[DST_1]], align 4
+; CHECK-NEXT: [[DST_2:%.*]] = getelementptr i32, ptr [[DST]], i32 2
+; CHECK-NEXT: store i32 [[MUL_2]], ptr [[DST_2]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %gep.src.0 = getelementptr inbounds i32, ptr %src, i32 0
+ %l.src.0 = load i32, ptr %gep.src.0, align 4
+ %mul.0 = mul nsw i32 %l.src.0, 10
+
+ %gep.src.1 = getelementptr inbounds i32, ptr %src, i32 1
+ %l.src.1 = load i32, ptr %gep.src.1, align 4
+ %mul.1 = mul nsw i32 %l.src.1, 10
+
+ %gep.src.2 = getelementptr inbounds i32, ptr %src, i32 2
+ %l.src.2 = load i32, ptr %gep.src.2, align 4
+ %mul.2 = mul nsw i32 %l.src.2, 10
+
+ store i32 %mul.0, ptr %dst
+
+ %dst.1 = getelementptr i32, ptr %dst, i32 1
+ store i32 %mul.1, ptr %dst.1
+
+ %dst.2 = getelementptr i32, ptr %dst, i32 2
+ store i32 %mul.2, ptr %dst.2
+
+ ret void
+}
+
+define void @v3_load_i32_mul_store(ptr %src.1, ptr %src.2, ptr %dst) {
+; CHECK-LABEL: @v3_load_i32_mul_store(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[GEP_SRC_1_0:%.*]] = getelementptr inbounds i32, ptr [[SRC_1:%.*]], i32 0
+; CHECK-NEXT: [[L_SRC_1_0:%.*]] = load i32, ptr [[GEP_SRC_1_0]], align 4
+; CHECK-NEXT: [[GEP_SRC_2_0:%.*]] = getelementptr inbounds i32, ptr [[SRC_2:%.*]], i32 0
+; CHECK-NEXT: [[L_SRC_2_0:%.*]] = load i32, ptr [[GEP_SRC_2_0]], align 4
+; CHECK-NEXT: [[MUL_0:%.*]] = mul nsw i32 [[L_SRC_1_0]], [[L_SRC_2_0]]
+; CHECK-NEXT: [[GEP_SRC_1_1:%.*]] = getelementptr inbounds i32, ptr [[SRC_1]], i32 1
+; CHECK-NEXT: [[L_SRC_1_1:%.*]] = load i32, ptr [[GEP_SRC_1_1]], align 4
+; CHECK-NEXT: [[GEP_SRC_2_1:%.*]] = getelementptr inbounds i32, ptr [[SRC_2]], i32 1
+; CHECK-NEXT: [[L_SRC_2_1:%.*]] = load i32, ptr [[GEP_SRC_2_1]], align 4
+; CHECK-NEXT: [[MUL_1:%.*]] = mul nsw i32 [[L_SRC_1_1]], [[L_SRC_2_1]]
+; CHECK-NEXT: [[GEP_SRC_1_2:%.*]] = getelementptr inbounds i32, ptr [[SRC_1]], i32 2
+; CHECK-NEXT: [[L_SRC_1_2:%.*]] = load i32, ptr [[GEP_SRC_1_2]], align 4
+; CHECK-NEXT: [[GEP_SRC_2_2:%.*]] = getelementptr inbounds i32, ptr [[SRC_2]], i32 2
+; CHECK-NEXT: [[L_SRC_2_2:%.*]] = load i32, ptr [[GEP_SRC_2_2]], align 4
+; CHECK-NEXT: [[MUL_2:%.*]] = mul nsw i32 [[L_SRC_1_2]], [[L_SRC_2_2]]
+; CHECK-NEXT: store i32 [[MUL_0]], ptr [[DST:%.*]], align 4
+; CHECK-NEXT: [[DST_1:%.*]] = getelementptr i32, ptr [[DST]], i32 1
+; CHECK-NEXT: store i32 [[MUL_1]], ptr [[DST_1]], align 4
+; CHECK-NEXT: [[DST_2:%.*]] = getelementptr i32, ptr [[DST]], i32 2
+; CHECK-NEXT: store i32 [[MUL_2]], ptr [[DST_2]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %gep.src.1.0 = getelementptr inbounds i32, ptr %src.1, i32 0
+ %l.src.1.0 = load i32, ptr %gep.src.1.0, align 4
+ %gep.src.2.0 = getelementptr inbounds i32, ptr %src.2, i32 0
+ %l.src.2.0 = load i32, ptr %gep.src.2.0, align 4
+ %mul.0 = mul nsw i32 %l.src.1.0, %l.src.2.0
+
+ %gep.src.1.1 = getelementptr inbounds i32, ptr %src.1, i32 1
+ %l.src.1.1 = load i32, ptr %gep.src.1.1, align 4
+ %gep.src.2.1 = getelementptr inbounds i32, ptr %src.2, i32 1
+ %l.src.2.1 = load i32, ptr %gep.src.2.1, align 4
+ %mul.1 = mul nsw i32 %l.src.1.1, %l.src.2.1
+
+ %gep.src.1.2 = getelementptr inbounds i32, ptr %src.1, i32 2
+ %l.src.1.2 = load i32, ptr %gep.src.1.2, align 4
+ %gep.src.2.2 = getelementptr inbounds i32, ptr %src.2, i32 2
+ %l.src.2.2 = load i32, ptr %gep.src.2.2, align 4
+ %mul.2 = mul nsw i32 %l.src.1.2, %l.src.2.2
+
+ store i32 %mul.0, ptr %dst
+
+ %dst.1 = getelementptr i32, ptr %dst, i32 1
+ store i32 %mul.1, ptr %dst.1
+
+ %dst.2 = getelementptr i32, ptr %dst, i32 2
+ store i32 %mul.2, ptr %dst.2
+
+ ret void
+}
+
+define void @v3_load_i32_mul_add_const_store(ptr %src.1, ptr %src.2, ptr %dst) {
+; CHECK-LABEL: @v3_load_i32_mul_add_const_store(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[GEP_SRC_1_0:%.*]] = getelementptr inbounds i32, ptr [[SRC_1:%.*]], i32 0
+; CHECK-NEXT: [[L_SRC_1_0:%.*]] = load i32, ptr [[GEP_SRC_1_0]], align 4
+; CHECK-NEXT: [[GEP_SRC_2_0:%.*]] = getelementptr inbounds i32, ptr [[SRC_2:%.*]], i32 0
+; CHECK-NEXT: [[L_SRC_2_0:%.*]] = load i32, ptr [[GEP_SRC_2_0]], align 4
+; CHECK-NEXT: [[MUL_0:%.*]] = mul nsw i32 [[L_SRC_1_0]], [[L_SRC_2_0]]
+; CHECK-NEXT: [[ADD_0:%.*]] = add i32 [[MUL_0]], 9
+; CHECK-NEXT: [[GEP_SRC_1_1:%.*]] = getelementptr inbounds i32, ptr [[SRC_1]], i32 1
+; CHECK-NEXT: [[L_SRC_1_1:%.*]] = load i32, ptr [[GEP_SRC_1_1]], align 4
+; CHECK-NEXT: [[GEP_SRC_2_1:%.*]] = getelementptr inbounds i32, ptr [[SRC_2]], i32 1
+; CHECK-NEXT: [[L_SRC_2_1:%.*]] = load i32, ptr [[GEP_SRC_2_1]], align 4
+; CHECK-NEXT: [[MUL_1:%.*]] = mul nsw i32 [[L_SRC_1_1]], [[L_SRC_2_1]]
+; CHECK-NEXT: [[ADD_1:%.*]] = add i32 [[MUL_1]], 9
+; CHECK-NEXT: [[GEP_SRC_1_2:%.*]] = getelementptr inbounds i32, ptr [[SRC_1]], i32 2
+; CHECK-NEXT: [[L_SRC_1_2:%.*]] = load i32, ptr [[GEP_SRC_1_2]], align 4
+; CHECK-NEXT: [[GEP_SRC_2_2:%.*]] = getelementptr inbounds i32, ptr [[SRC_2]], i32 2
+; CHECK-NEXT: [[L_SRC_2_2:%.*]] = load i32, ptr [[GEP_SRC_2_2]], align 4
+; CHECK-NEXT: [[MUL_2:%.*]] = mul nsw i32 [[L_SRC_1_2]], [[L_SRC_2_2]]
+; CHECK-NEXT: [[ADD_2:%.*]] = add i32 [[MUL_2]], 9
+; CHECK-NEXT: store i32 [[ADD_0]], ptr [[DST:%.*]], align 4
+; CHECK-NEXT: [[DST_1:%.*]] = getelementptr i32, ptr [[DST]], i32 1
+; CHECK-NEXT: store i32 [[ADD_1]], ptr [[DST_1]], align 4
+; CHECK-NEXT: [[DST_2:%.*]] = getelementptr i32, ptr [[DST]], i32 2
+; CHECK-NEXT: store i32 [[ADD_2]], ptr [[DST_2]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %gep.src.1.0 = getelementptr inbounds i32, ptr %src.1, i32 0
+ %l.src.1.0 = load i32, ptr %gep.src.1.0, align 4
+ %gep.src.2.0 = getelementptr inbounds i32, ptr %src.2, i32 0
+ %l.src.2.0 = load i32, ptr %gep.src.2.0, align 4
+ %mul.0 = mul nsw i32 %l.src.1.0, %l.src.2.0
+ %add.0 = add i32 %mul.0, 9
+
+ %gep.src.1.1 = getelementptr inbounds i32, ptr %src.1, i32 1
+ %l.src.1.1 = load i32, ptr %gep.src.1.1, align 4
+ %gep.src.2.1 = getelementptr inbounds i32, ptr %src.2, i32 1
+ %l.src.2.1 = load i32, ptr %gep.src.2.1, align 4
+ %mul.1 = mul nsw i32 %l.src.1.1, %l.src.2.1
+ %add.1 = add i32 %mul.1, 9
+
+ %gep.src.1.2 = getelementptr inbounds i32, ptr %src.1, i32 2
+ %l.src.1.2 = load i32, ptr %gep.src.1.2, align 4
+ %gep.src.2.2 = getelementptr inbounds i32, ptr %src.2, i32 2
+ %l.src.2.2 = load i32, ptr %gep.src.2.2, align 4
+ %mul.2 = mul nsw i32 %l.src.1.2, %l.src.2.2
+ %add.2 = add i32 %mul.2, 9
+
+ store i32 %add.0, ptr %dst
+
+ %dst.1 = getelementptr i32, ptr %dst, i32 1
+ store i32 %add.1, ptr %dst.1
+
+ %dst.2 = getelementptr i32, ptr %dst, i32 2
+ store i32 %add.2, ptr %dst.2
+
+ ret void
+}
+
+define void @v3_load_f32_fadd_fadd_by_constant_store(ptr %src, ptr %dst) {
+; CHECK-LABEL: @v3_load_f32_fadd_fadd_by_constant_store(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr inbounds float, ptr [[SRC:%.*]], i32 0
+; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr inbounds float, ptr [[SRC]], i32 2
+; CHECK-NEXT: [[L_SRC_2:%.*]] = load float, ptr [[GEP_SRC_2]], align 4
+; CHECK-NEXT: [[FADD_2:%.*]] = fadd float [[L_SRC_2]], 1.000000e+01
+; CHECK-NEXT: [[TMP0:%.*]] = load <2 x float>, ptr [[GEP_SRC_0]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = fadd <2 x float> [[TMP0]], <float 1.000000e+01, float 1.000000e+01>
+; CHECK-NEXT: store <2 x float> [[TMP1]], ptr [[DST:%.*]], align 4
+; CHECK-NEXT: [[DST_2:%.*]] = getelementptr float, ptr [[DST]], i32 2
+; CHECK-NEXT: store float [[FADD_2]], ptr [[DST_2]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %gep.src.0 = getelementptr inbounds float, ptr %src, i32 0
+ %l.src.0 = load float , ptr %gep.src.0, align 4
+ %fadd.0 = fadd float %l.src.0, 10.0
+
+ %gep.src.1 = getelementptr inbounds float , ptr %src, i32 1
+ %l.src.1 = load float, ptr %gep.src.1, align 4
+ %fadd.1 = fadd float %l.src.1, 10.0
+
+ %gep.src.2 = getelementptr inbounds float, ptr %src, i32 2
+ %l.src.2 = load float, ptr %gep.src.2, align 4
+ %fadd.2 = fadd float %l.src.2, 10.0
+
+ store float %fadd.0, ptr %dst
+
+ %dst.1 = getelementptr float, ptr %dst, i32 1
+ store float %fadd.1, ptr %dst.1
+
+ %dst.2 = getelementptr float, ptr %dst, i32 2
+ store float %fadd.2, ptr %dst.2
+
+ ret void
+}
+
+define void @phi_store3(ptr %dst) {
+; CHECK-LABEL: @phi_store3(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[EXIT:%.*]]
+; CHECK: invoke.cont8.loopexit:
+; CHECK-NEXT: br label [[EXIT]]
+; CHECK: exit:
+; CHECK-NEXT: [[P_2:%.*]] = phi i32 [ 3, [[ENTRY:%.*]] ], [ 0, [[INVOKE_CONT8_LOOPEXIT:%.*]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = phi <2 x i32> [ <i32 1, i32 2>, [[ENTRY]] ], [ poison, [[INVOKE_CONT8_LOOPEXIT]] ]
+; CHECK-NEXT: [[DST_2:%.*]] = getelementptr i32, ptr [[DST:%.*]], i32 2
+; CHECK-NEXT: store <2 x i32> [[TMP0]], ptr [[DST]], align 4
+; CHECK-NEXT: store i32 [[P_2]], ptr [[DST_2]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %exit
+
+invoke.cont8.loopexit: ; No predecessors!
+ br label %exit
+
+exit:
+ %p.0 = phi i32 [ 1, %entry ], [ 0, %invoke.cont8.loopexit ]
+ %p.1 = phi i32 [ 2, %entry ], [ 0, %invoke.cont8.loopexit ]
+ %p.2 = phi i32 [ 3, %entry ], [ 0, %invoke.cont8.loopexit ]
+
+ %dst.1 = getelementptr i32, ptr %dst, i32 1
+ %dst.2 = getelementptr i32, ptr %dst, i32 2
+
+ store i32 %p.0, ptr %dst, align 4
+ store i32 %p.1, ptr %dst.1, align 4
+ store i32 %p.2, ptr %dst.2, align 4
+ ret void
+}
+
+define void @store_try_reorder(ptr %dst) {
+; CHECK-LABEL: @store_try_reorder(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ADD:%.*]] = add i32 0, 0
+; CHECK-NEXT: store i32 [[ADD]], ptr [[DST:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX_I1887:%.*]] = getelementptr i32, ptr [[DST]], i64 1
+; CHECK-NEXT: store <2 x i32> zeroinitializer, ptr [[ARRAYIDX_I1887]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %add = add i32 0, 0
+ store i32 %add, ptr %dst, align 4
+ %add207 = sub i32 0, 0
+ %arrayidx.i1887 = getelementptr i32, ptr %dst, i64 1
+ store i32 %add207, ptr %arrayidx.i1887, align 4
+ %add216 = sub i32 0, 0
+ %arrayidx.i1891 = getelementptr i32, ptr %dst, i64 2
+ store i32 %add216, ptr %arrayidx.i1891, align 4
+ ret void
+}
+
+define void @vec3_fpext_cost(ptr %Colour, float %0) {
+; CHECK-LABEL: @vec3_fpext_cost(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ARRAYIDX80:%.*]] = getelementptr float, ptr [[COLOUR:%.*]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x float> poison, float [[TMP0:%.*]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP3:%.*]] = fpext <2 x float> [[TMP2]] to <2 x double>
+; CHECK-NEXT: [[TMP4:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[TMP3]], <2 x double> zeroinitializer, <2 x double> zeroinitializer)
+; CHECK-NEXT: [[TMP5:%.*]] = fptrunc <2 x double> [[TMP4]] to <2 x float>
+; CHECK-NEXT: store <2 x float> [[TMP5]], ptr [[COLOUR]], align 4
+; CHECK-NEXT: [[CONV78:%.*]] = fpext float [[TMP0]] to double
+; CHECK-NEXT: [[TMP6:%.*]] = call double @llvm.fmuladd.f64(double [[CONV78]], double 0.000000e+00, double 0.000000e+00)
+; CHECK-NEXT: [[CONV82:%.*]] = fptrunc double [[TMP6]] to float
+; CHECK-NEXT: store float [[CONV82]], ptr [[ARRAYIDX80]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %arrayidx72 = getelementptr float, ptr %Colour, i64 1
+ %arrayidx80 = getelementptr float, ptr %Colour, i64 2
+ %conv62 = fpext float %0 to double
+ %1 = call double @llvm.fmuladd.f64(double %conv62, double 0.000000e+00, double 0.000000e+00)
+ %conv66 = fptrunc double %1 to float
+ store float %conv66, ptr %Colour, align 4
+ %conv70 = fpext float %0 to double
+ %2 = call double @llvm.fmuladd.f64(double %conv70, double 0.000000e+00, double 0.000000e+00)
+ %conv74 = fptrunc double %2 to float
+ store float %conv74, ptr %arrayidx72, align 4
+ %conv78 = fpext float %0 to double
+ %3 = call double @llvm.fmuladd.f64(double %conv78, double 0.000000e+00, double 0.000000e+00)
+ %conv82 = fptrunc double %3 to float
+ store float %conv82, ptr %arrayidx80, align 4
+ ret void
+}
+
+define void @fpext_gather(ptr %dst, double %conv) {
+; CHECK-LABEL: @fpext_gather(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x double> poison, double [[CONV:%.*]], i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x double> [[TMP0]], <2 x double> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = fptrunc <2 x double> [[TMP1]] to <2 x float>
+; CHECK-NEXT: [[LENGTHS:%.*]] = getelementptr float, ptr [[DST:%.*]], i64 0
+; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x float> [[TMP2]], i32 0
+; CHECK-NEXT: store float [[TMP3]], ptr [[LENGTHS]], align 4
+; CHECK-NEXT: [[ARRAYIDX32:%.*]] = getelementptr float, ptr [[DST]], i64 1
+; CHECK-NEXT: store <2 x float> [[TMP2]], ptr [[ARRAYIDX32]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %conv25 = fptrunc double %conv to float
+ %Lengths = getelementptr float, ptr %dst, i64 0
+ store float %conv25, ptr %Lengths, align 4
+ %arrayidx32 = getelementptr float, ptr %dst, i64 1
+ store float %conv25, ptr %arrayidx32, align 4
+ %conv34 = fptrunc double %conv to float
+ %arrayidx37 = getelementptr float, ptr %dst, i64 2
+ store float %conv34, ptr %arrayidx37, align 4
+ ret void
+}
+
+declare float @llvm.fmuladd.f32(float, float, float)
+
+declare double @llvm.fmuladd.f64(double, double, double)
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/vec3-calls.ll b/llvm/test/Transforms/SLPVectorizer/X86/vec3-calls.ll
new file mode 100644
index 0000000..71b9315
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/vec3-calls.ll
@@ -0,0 +1,60 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -passes=slp-vectorizer -mtriple=x86_64-apple-macosx -S %s | FileCheck %s
+
+define void @vec3_vectorize_call(ptr %Colour, float %0) {
+; CHECK-LABEL: @vec3_vectorize_call(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x float>, ptr [[COLOUR:%.*]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> [[TMP1]], <2 x float> zeroinitializer, <2 x float> zeroinitializer)
+; CHECK-NEXT: store <2 x float> [[TMP2]], ptr [[COLOUR]], align 4
+; CHECK-NEXT: [[ARRAYIDX99_I1:%.*]] = getelementptr float, ptr [[COLOUR]], i64 2
+; CHECK-NEXT: [[TMP3:%.*]] = call float @llvm.fmuladd.f32(float [[TMP0:%.*]], float 0.000000e+00, float 0.000000e+00)
+; CHECK-NEXT: store float [[TMP3]], ptr [[ARRAYIDX99_I1]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %1 = load float, ptr %Colour, align 4
+ %2 = call float @llvm.fmuladd.f32(float %1, float 0.000000e+00, float 0.000000e+00)
+ store float %2, ptr %Colour, align 4
+ %arrayidx91.i = getelementptr float, ptr %Colour, i64 1
+ %3 = load float, ptr %arrayidx91.i, align 4
+ %4 = call float @llvm.fmuladd.f32(float %3, float 0.000000e+00, float 0.000000e+00)
+ store float %4, ptr %arrayidx91.i, align 4
+ %arrayidx99.i1 = getelementptr float, ptr %Colour, i64 2
+ %5 = call float @llvm.fmuladd.f32(float %0, float 0.000000e+00, float 0.000000e+00)
+ store float %5, ptr %arrayidx99.i1, align 4
+ ret void
+}
+
+define void @vec3_fmuladd_64(ptr %Colour, double %0) {
+; CHECK-LABEL: @vec3_fmuladd_64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ARRAYIDX80:%.*]] = getelementptr float, ptr [[COLOUR:%.*]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> poison, double [[TMP0:%.*]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP3:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[TMP2]], <2 x double> zeroinitializer, <2 x double> zeroinitializer)
+; CHECK-NEXT: [[TMP4:%.*]] = fptrunc <2 x double> [[TMP3]] to <2 x float>
+; CHECK-NEXT: store <2 x float> [[TMP4]], ptr [[COLOUR]], align 4
+; CHECK-NEXT: [[TMP5:%.*]] = call double @llvm.fmuladd.f64(double [[TMP0]], double 0.000000e+00, double 0.000000e+00)
+; CHECK-NEXT: [[CONV82:%.*]] = fptrunc double [[TMP5]] to float
+; CHECK-NEXT: store float [[CONV82]], ptr [[ARRAYIDX80]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %arrayidx72 = getelementptr float, ptr %Colour, i64 1
+ %arrayidx80 = getelementptr float, ptr %Colour, i64 2
+ %1 = call double @llvm.fmuladd.f64(double %0, double 0.000000e+00, double 0.000000e+00)
+ %conv66 = fptrunc double %1 to float
+ store float %conv66, ptr %Colour, align 4
+ %2 = call double @llvm.fmuladd.f64(double %0, double 0.000000e+00, double 0.000000e+00)
+ %conv74 = fptrunc double %2 to float
+ store float %conv74, ptr %arrayidx72, align 4
+ %3 = call double @llvm.fmuladd.f64(double %0, double 0.000000e+00, double 0.000000e+00)
+ %conv82 = fptrunc double %3 to float
+ store float %conv82, ptr %arrayidx80, align 4
+ ret void
+}
+
+declare float @llvm.fmuladd.f32(float, float, float)
+
+declare double @llvm.fmuladd.f64(double, double, double)
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/vec3-gather-some-loads.ll b/llvm/test/Transforms/SLPVectorizer/X86/vec3-gather-some-loads.ll
new file mode 100644
index 0000000..1411f94
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/vec3-gather-some-loads.ll
@@ -0,0 +1,66 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -passes=slp-vectorizer -mtriple=x86_64-apple-macosx -S %s | FileCheck %s
+
+target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+
+define void @test_insert_loads(ptr %A, ptr noalias %B, float %0) #0 {
+; CHECK-LABEL: define void @test_insert_loads(
+; CHECK-SAME: ptr [[A:%.*]], ptr noalias [[B:%.*]], float [[TMP0:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[MULADD_0:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP0]], float 1.000000e+00, float 1.000000e+00)
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x float> poison, float [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP3:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> [[TMP2]], <2 x float> <float 3.000000e+00, float 2.000000e+00>, <2 x float> <float 3.000000e+00, float 2.000000e+00>)
+; CHECK-NEXT: [[A_28:%.*]] = getelementptr i8, ptr [[A]], i64 28
+; CHECK-NEXT: [[L_A_28:%.*]] = load float, ptr [[A_28]], align 4
+; CHECK-NEXT: [[A_12:%.*]] = getelementptr i8, ptr [[A]], i64 12
+; CHECK-NEXT: [[L_A_12:%.*]] = load float, ptr [[A_12]], align 4
+; CHECK-NEXT: [[GEP_4:%.*]] = getelementptr i8, ptr [[B]], i64 4
+; CHECK-NEXT: [[L_B_0:%.*]] = load float, ptr [[B]], align 4
+; CHECK-NEXT: [[GEP_28:%.*]] = getelementptr i8, ptr [[B]], i64 28
+; CHECK-NEXT: [[GEP_20:%.*]] = getelementptr i8, ptr [[B]], i64 20
+; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x float> poison, float [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x float> [[TMP4]], <4 x float> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x float> <float poison, float poison, float poison, float 4.000000e+00>, float [[L_A_12]], i32 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[TMP6]], float [[L_A_28]], i32 1
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x float> [[TMP7]], <4 x float> poison, <4 x i32> <i32 0, i32 1, i32 1, i32 3>
+; CHECK-NEXT: [[TMP9:%.*]] = insertelement <4 x float> <float poison, float 0.000000e+00, float 0.000000e+00, float 4.000000e+00>, float [[L_B_0]], i32 0
+; CHECK-NEXT: [[TMP10:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[TMP5]], <4 x float> [[TMP8]], <4 x float> [[TMP9]])
+; CHECK-NEXT: store <4 x float> [[TMP10]], ptr [[GEP_4]], align 4
+; CHECK-NEXT: store <2 x float> [[TMP3]], ptr [[GEP_20]], align 4
+; CHECK-NEXT: store float [[MULADD_0]], ptr [[GEP_28]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %muladd.0 = tail call float @llvm.fmuladd.f32(float %0, float 1.000000e+00, float 1.000000e+00)
+ %muladd.1 = tail call float @llvm.fmuladd.f32(float %0, float 2.000000e+00, float 2.000000e+00)
+ %muladd.2 = tail call float @llvm.fmuladd.f32(float %0, float 3.000000e+00, float 3.000000e+00)
+ %muladd.3 = tail call float @llvm.fmuladd.f32(float %0, float 4.000000e+00, float 4.000000e+00)
+ %A.28 = getelementptr i8, ptr %A, i64 28
+ %l.A.28 = load float, ptr %A.28, align 4
+ %muladd.4 = tail call float @llvm.fmuladd.f32(float %0, float %l.A.28, float 0.000000e+00)
+ %muladd.5 = tail call float @llvm.fmuladd.f32(float %0, float %l.A.28, float 0.000000e+00)
+ %A.12 = getelementptr i8, ptr %A, i64 12
+ %l.A.12 = load float, ptr %A.12, align 4
+ %gep.4 = getelementptr i8, ptr %B, i64 4
+ %gep.12 = getelementptr i8, ptr %B, i64 12
+ %l.B.0 = load float, ptr %B, align 4
+ %muladd.6 = tail call float @llvm.fmuladd.f32(float %0, float %l.A.12, float %l.B.0)
+ %gep.28 = getelementptr i8, ptr %B, i64 28
+ %gep.24 = getelementptr i8, ptr %B, i64 24
+ %gep.20 = getelementptr i8, ptr %B, i64 20
+ %gep.16 = getelementptr i8, ptr %B, i64 16
+ %gep.8 = getelementptr i8, ptr %B, i64 8
+ store float %muladd.6, ptr %gep.4, align 4
+ store float %muladd.5, ptr %gep.8, align 8
+ store float %muladd.4, ptr %gep.12, align 4
+ store float %muladd.3, ptr %gep.16, align 16
+ store float %muladd.2, ptr %gep.20, align 4
+ store float %muladd.1, ptr %gep.24, align 8
+ store float %muladd.0, ptr %gep.28, align 4
+ ret void
+}
+
+declare float @llvm.fmuladd.f32(float, float, float)
+
+attributes #0 = { "target-cpu"="skylake-avx512" }
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/vec3-reorder-reshuffle.ll b/llvm/test/Transforms/SLPVectorizer/X86/vec3-reorder-reshuffle.ll
new file mode 100644
index 0000000..9584a66
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/vec3-reorder-reshuffle.ll
@@ -0,0 +1,513 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -passes=slp-vectorizer -mtriple=x86_64-apple-macosx -S %s | FileCheck %s
+
+%struct.zot = type { i32, i32, i32 }
+
+define i1 @reorder_results(ptr %arg, i1 %arg1, ptr %arg2, i64 %arg3, ptr %arg4) {
+; CHECK-LABEL: define i1 @reorder_results(
+; CHECK-SAME: ptr [[ARG:%.*]], i1 [[ARG1:%.*]], ptr [[ARG2:%.*]], i64 [[ARG3:%.*]], ptr [[ARG4:%.*]]) {
+; CHECK-NEXT: bb:
+; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr [[ARG4]], align 8
+; CHECK-NEXT: [[LOAD4:%.*]] = load i32, ptr [[LOAD]], align 4
+; CHECK-NEXT: [[GETELEMENTPTR:%.*]] = getelementptr i8, ptr [[LOAD]], i64 4
+; CHECK-NEXT: [[LOAD5:%.*]] = load i32, ptr [[GETELEMENTPTR]], align 4
+; CHECK-NEXT: [[GETELEMENTPTR6:%.*]] = getelementptr i8, ptr [[LOAD]], i64 8
+; CHECK-NEXT: [[LOAD7:%.*]] = load i32, ptr [[GETELEMENTPTR6]], align 4
+; CHECK-NEXT: br i1 [[ARG1]], label [[BB12:%.*]], label [[BB9:%.*]]
+; CHECK: bb8:
+; CHECK-NEXT: ret i1 false
+; CHECK: bb9:
+; CHECK-NEXT: [[FREEZE:%.*]] = freeze ptr [[ARG]]
+; CHECK-NEXT: store i32 [[LOAD4]], ptr [[FREEZE]], align 4
+; CHECK-NEXT: [[GETELEMENTPTR10:%.*]] = getelementptr i8, ptr [[FREEZE]], i64 4
+; CHECK-NEXT: store i32 [[LOAD7]], ptr [[GETELEMENTPTR10]], align 4
+; CHECK-NEXT: [[GETELEMENTPTR11:%.*]] = getelementptr i8, ptr [[FREEZE]], i64 8
+; CHECK-NEXT: store i32 [[LOAD5]], ptr [[GETELEMENTPTR11]], align 4
+; CHECK-NEXT: br label [[BB8:%.*]]
+; CHECK: bb12:
+; CHECK-NEXT: [[GETELEMENTPTR13:%.*]] = getelementptr [[STRUCT_ZOT:%.*]], ptr [[ARG2]], i64 [[ARG3]]
+; CHECK-NEXT: store i32 [[LOAD4]], ptr [[GETELEMENTPTR13]], align 4
+; CHECK-NEXT: [[GETELEMENTPTR14:%.*]] = getelementptr i8, ptr [[GETELEMENTPTR13]], i64 4
+; CHECK-NEXT: store i32 [[LOAD7]], ptr [[GETELEMENTPTR14]], align 4
+; CHECK-NEXT: [[GETELEMENTPTR15:%.*]] = getelementptr i8, ptr [[GETELEMENTPTR13]], i64 8
+; CHECK-NEXT: store i32 [[LOAD5]], ptr [[GETELEMENTPTR15]], align 4
+; CHECK-NEXT: br label [[BB8]]
+;
+bb:
+ %load = load ptr, ptr %arg4, align 8
+ %load4 = load i32, ptr %load, align 4
+ %getelementptr = getelementptr i8, ptr %load, i64 4
+ %load5 = load i32, ptr %getelementptr, align 4
+ %getelementptr6 = getelementptr i8, ptr %load, i64 8
+ %load7 = load i32, ptr %getelementptr6, align 4
+ br i1 %arg1, label %bb12, label %bb9
+
+bb8: ; preds = %bb12, %bb9
+ ret i1 false
+
+bb9: ; preds = %bb
+ %freeze = freeze ptr %arg
+ store i32 %load4, ptr %freeze, align 4
+ %getelementptr10 = getelementptr i8, ptr %freeze, i64 4
+ store i32 %load7, ptr %getelementptr10, align 4
+ %getelementptr11 = getelementptr i8, ptr %freeze, i64 8
+ store i32 %load5, ptr %getelementptr11, align 4
+ br label %bb8
+
+bb12: ; preds = %bb
+ %getelementptr13 = getelementptr %struct.zot, ptr %arg2, i64 %arg3
+ store i32 %load4, ptr %getelementptr13, align 4
+ %getelementptr14 = getelementptr i8, ptr %getelementptr13, i64 4
+ store i32 %load7, ptr %getelementptr14, align 4
+ %getelementptr15 = getelementptr i8, ptr %getelementptr13, i64 8
+ store i32 %load5, ptr %getelementptr15, align 4
+ br label %bb8
+}
+
+define void @extract_mask(ptr %object, double %conv503, double %conv520) {
+; CHECK-LABEL: define void @extract_mask(
+; CHECK-SAME: ptr [[OBJECT:%.*]], double [[CONV503:%.*]], double [[CONV520:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[OBJECT]], align 8
+; CHECK-NEXT: [[BBOX483:%.*]] = getelementptr float, ptr [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x float>, ptr [[BBOX483]], align 8
+; CHECK-NEXT: [[TMP2:%.*]] = fpext <2 x float> [[TMP1]] to <2 x double>
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x double> [[TMP2]], <2 x double> poison, <2 x i32> <i32 1, i32 0>
+; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x double> [[TMP3]], double [[CONV503]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = fcmp ogt <2 x double> [[TMP4]], <double 0.000000e+00, double -2.000000e+10>
+; CHECK-NEXT: [[TMP6:%.*]] = select <2 x i1> [[TMP5]], <2 x double> [[TMP3]], <2 x double> <double 0.000000e+00, double -2.000000e+10>
+; CHECK-NEXT: [[TMP7:%.*]] = fsub <2 x double> zeroinitializer, [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = fptrunc <2 x double> [[TMP7]] to <2 x float>
+; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x float> [[TMP8]], i32 0
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <2 x float> [[TMP8]], i32 1
+; CHECK-NEXT: [[MUL646:%.*]] = fmul float [[TMP9]], [[TMP10]]
+; CHECK-NEXT: [[CMP663:%.*]] = fcmp olt float [[MUL646]], 0.000000e+00
+; CHECK-NEXT: br i1 [[CMP663]], label [[IF_THEN665:%.*]], label [[IF_END668:%.*]]
+; CHECK: if.then665:
+; CHECK-NEXT: [[ARRAYIDX656:%.*]] = getelementptr float, ptr [[OBJECT]], i64 10
+; CHECK-NEXT: [[BBOX651:%.*]] = getelementptr float, ptr [[OBJECT]]
+; CHECK-NEXT: [[CONV613:%.*]] = fptrunc double [[CONV503]] to float
+; CHECK-NEXT: store float [[CONV613]], ptr [[BBOX651]], align 8
+; CHECK-NEXT: [[BBOX_SROA_6_0_BBOX666_SROA_IDX:%.*]] = getelementptr float, ptr [[OBJECT]], i64 1
+; CHECK-NEXT: [[TMP11:%.*]] = insertelement <2 x double> [[TMP6]], double [[CONV520]], i32 1
+; CHECK-NEXT: [[TMP12:%.*]] = fptrunc <2 x double> [[TMP11]] to <2 x float>
+; CHECK-NEXT: store <2 x float> [[TMP12]], ptr [[BBOX_SROA_6_0_BBOX666_SROA_IDX]], align 4
+; CHECK-NEXT: store <2 x float> [[TMP8]], ptr [[ARRAYIDX656]], align 8
+; CHECK-NEXT: br label [[IF_END668]]
+; CHECK: if.end668:
+; CHECK-NEXT: ret void
+;
+entry:
+ %0 = load ptr, ptr %object, align 8
+ %bbox483 = getelementptr float, ptr %0
+ %1 = load float, ptr %bbox483, align 8
+ %conv486 = fpext float %1 to double
+ %cmp487 = fcmp ogt double %conv486, -2.000000e+10
+ %conv486.2 = select i1 %cmp487, double %conv486, double -2.000000e+10
+ %arrayidx502 = getelementptr float, ptr %0, i64 1
+ %2 = load float, ptr %arrayidx502, align 4
+ %conv5033 = fpext float %2 to double
+ %cmp504 = fcmp ogt double %conv503, 0.000000e+00
+ %cond514 = select i1 %cmp504, double %conv5033, double 0.000000e+00
+ %sub626 = fsub double 0.000000e+00, %conv486.2
+ %conv627 = fptrunc double %sub626 to float
+ %sub632 = fsub double 0.000000e+00, %cond514
+ %conv633 = fptrunc double %sub632 to float
+ %mul646 = fmul float %conv633, %conv627
+ %cmp663 = fcmp olt float %mul646, 0.000000e+00
+ br i1 %cmp663, label %if.then665, label %if.end668
+
+if.then665: ; preds = %entry
+ %arrayidx656 = getelementptr float, ptr %object, i64 10
+ %lengths652 = getelementptr float, ptr %object, i64 11
+ %bbox651 = getelementptr float, ptr %object
+ %conv621 = fptrunc double %conv520 to float
+ %conv617 = fptrunc double %cond514 to float
+ %conv613 = fptrunc double %conv503 to float
+ store float %conv613, ptr %bbox651, align 8
+ %bbox.sroa.6.0.bbox666.sroa_idx = getelementptr float, ptr %object, i64 1
+ store float %conv617, ptr %bbox.sroa.6.0.bbox666.sroa_idx, align 4
+ %bbox.sroa.8.0.bbox666.sroa_idx = getelementptr float, ptr %object, i64 2
+ store float %conv621, ptr %bbox.sroa.8.0.bbox666.sroa_idx, align 8
+ store float %conv627, ptr %lengths652, align 4
+ store float %conv633, ptr %arrayidx656, align 8
+ br label %if.end668
+
+if.end668: ; preds = %if.then665, %entry
+ ret void
+}
+
+define void @gather_2(ptr %mat1, float %0, float %1) {
+; CHECK-LABEL: define void @gather_2(
+; CHECK-SAME: ptr [[MAT1:%.*]], float [[TMP0:%.*]], float [[TMP1:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x float> poison, float [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x float> [[TMP2]], float [[TMP1]], i32 1
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x float> [[TMP3]], <2 x float> <float 0.000000e+00, float poison>, <2 x i32> <i32 2, i32 0>
+; CHECK-NEXT: [[TMP5:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> [[TMP3]], <2 x float> [[TMP4]], <2 x float> zeroinitializer)
+; CHECK-NEXT: [[TMP6:%.*]] = call float @llvm.fmuladd.f32(float [[TMP0]], float [[TMP1]], float 0.000000e+00)
+; CHECK-NEXT: [[TMP7:%.*]] = fmul float [[TMP6]], 0.000000e+00
+; CHECK-NEXT: [[ARRAYIDX163:%.*]] = getelementptr [4 x [4 x float]], ptr [[MAT1]], i64 0, i64 1
+; CHECK-NEXT: [[ARRAYIDX5_I_I_I280:%.*]] = getelementptr [4 x [4 x float]], ptr [[MAT1]], i64 0, i64 1, i64 2
+; CHECK-NEXT: [[TMP8:%.*]] = fmul <2 x float> [[TMP5]], zeroinitializer
+; CHECK-NEXT: store <2 x float> [[TMP8]], ptr [[ARRAYIDX163]], align 4
+; CHECK-NEXT: store float [[TMP7]], ptr [[ARRAYIDX5_I_I_I280]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %2 = call float @llvm.fmuladd.f32(float %0, float 0.000000e+00, float 0.000000e+00)
+ %3 = call float @llvm.fmuladd.f32(float %1, float %0, float 0.000000e+00)
+ %4 = call float @llvm.fmuladd.f32(float %0, float %1, float 0.000000e+00)
+ %5 = fmul float %2, 0.000000e+00
+ %6 = fmul float %3, 0.000000e+00
+ %7 = fmul float %4, 0.000000e+00
+ %arrayidx163 = getelementptr [4 x [4 x float]], ptr %mat1, i64 0, i64 1
+ %arrayidx2.i.i.i278 = getelementptr [4 x [4 x float]], ptr %mat1, i64 0, i64 1, i64 1
+ %arrayidx5.i.i.i280 = getelementptr [4 x [4 x float]], ptr %mat1, i64 0, i64 1, i64 2
+ store float %5, ptr %arrayidx163, align 4
+ store float %6, ptr %arrayidx2.i.i.i278, align 4
+ store float %7, ptr %arrayidx5.i.i.i280, align 4
+ ret void
+}
+
+define i32 @reorder_indices_1(float %0) {
+; CHECK-LABEL: define i32 @reorder_indices_1(
+; CHECK-SAME: float [[TMP0:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[NOR1:%.*]] = alloca [0 x [3 x float]], i32 0, align 4
+; CHECK-NEXT: [[ARRAYIDX2_I265:%.*]] = getelementptr float, ptr [[NOR1]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX2_I265]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x float>, ptr [[NOR1]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x float> [[TMP2]], i32 0
+; CHECK-NEXT: [[TMP4:%.*]] = fneg float [[TMP3]]
+; CHECK-NEXT: [[NEG11_I:%.*]] = fmul float [[TMP4]], [[TMP0]]
+; CHECK-NEXT: [[TMP5:%.*]] = call float @llvm.fmuladd.f32(float [[TMP1]], float 0.000000e+00, float [[NEG11_I]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x float> poison, float [[TMP1]], i32 0
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x float> [[TMP6]], <2 x float> [[TMP2]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: [[TMP8:%.*]] = fneg <2 x float> [[TMP7]]
+; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x float> poison, float [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x float> [[TMP9]], <2 x float> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP11:%.*]] = fmul <2 x float> [[TMP8]], [[TMP10]]
+; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <2 x float> [[TMP11]], <2 x float> poison, <2 x i32> <i32 1, i32 0>
+; CHECK-NEXT: [[TMP13:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> [[TMP2]], <2 x float> zeroinitializer, <2 x float> [[TMP12]])
+; CHECK-NEXT: [[TMP14:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> [[TMP10]], <2 x float> [[TMP13]], <2 x float> zeroinitializer)
+; CHECK-NEXT: [[TMP15:%.*]] = call float @llvm.fmuladd.f32(float [[TMP0]], float [[TMP5]], float 0.000000e+00)
+; CHECK-NEXT: [[TMP16:%.*]] = fmul <2 x float> [[TMP14]], zeroinitializer
+; CHECK-NEXT: [[MUL6_I_I_I:%.*]] = fmul float [[TMP15]], 0.000000e+00
+; CHECK-NEXT: store <2 x float> [[TMP16]], ptr [[NOR1]], align 4
+; CHECK-NEXT: store float [[MUL6_I_I_I]], ptr [[ARRAYIDX2_I265]], align 4
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ %nor1 = alloca [0 x [3 x float]], i32 0, align 4
+ %arrayidx.i = getelementptr float, ptr %nor1, i64 1
+ %1 = load float, ptr %arrayidx.i, align 4
+ %arrayidx2.i265 = getelementptr float, ptr %nor1, i64 2
+ %2 = load float, ptr %arrayidx2.i265, align 4
+ %3 = fneg float %2
+ %neg.i267 = fmul float %3, %0
+ %4 = call float @llvm.fmuladd.f32(float %1, float 0.000000e+00, float %neg.i267)
+ %5 = load float, ptr %nor1, align 4
+ %6 = fneg float %5
+ %neg11.i = fmul float %6, %0
+ %7 = call float @llvm.fmuladd.f32(float %2, float 0.000000e+00, float %neg11.i)
+ %8 = fneg float %1
+ %neg18.i = fmul float %8, %0
+ %9 = call float @llvm.fmuladd.f32(float %5, float 0.000000e+00, float %neg18.i)
+ %10 = call float @llvm.fmuladd.f32(float %0, float %9, float 0.000000e+00)
+ %11 = call float @llvm.fmuladd.f32(float %0, float %4, float 0.000000e+00)
+ %12 = call float @llvm.fmuladd.f32(float %0, float %7, float 0.000000e+00)
+ %mul.i.i.i = fmul float %10, 0.000000e+00
+ %mul3.i.i.i = fmul float %11, 0.000000e+00
+ %mul6.i.i.i = fmul float %12, 0.000000e+00
+ store float %mul.i.i.i, ptr %nor1, align 4
+ store float %mul3.i.i.i, ptr %arrayidx.i, align 4
+ store float %mul6.i.i.i, ptr %arrayidx2.i265, align 4
+ ret i32 0
+}
+
+define void @reorder_indices_2(ptr %spoint) {
+; CHECK-LABEL: define void @reorder_indices_2(
+; CHECK-SAME: ptr [[SPOINT:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = extractelement <3 x float> zeroinitializer, i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP0]], float 0.000000e+00, float 0.000000e+00)
+; CHECK-NEXT: [[MUL4_I461:%.*]] = fmul float [[TMP1]], 0.000000e+00
+; CHECK-NEXT: [[DSCO:%.*]] = getelementptr float, ptr [[SPOINT]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> zeroinitializer, <2 x float> zeroinitializer, <2 x float> zeroinitializer)
+; CHECK-NEXT: [[TMP3:%.*]] = fmul <2 x float> [[TMP2]], zeroinitializer
+; CHECK-NEXT: store <2 x float> [[TMP3]], ptr [[DSCO]], align 4
+; CHECK-NEXT: [[ARRAYIDX5_I476:%.*]] = getelementptr float, ptr [[SPOINT]], i64 2
+; CHECK-NEXT: store float [[MUL4_I461]], ptr [[ARRAYIDX5_I476]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %0 = extractelement <3 x float> zeroinitializer, i64 1
+ %1 = extractelement <3 x float> zeroinitializer, i64 2
+ %2 = extractelement <3 x float> zeroinitializer, i64 0
+ %3 = tail call float @llvm.fmuladd.f32(float %0, float 0.000000e+00, float 0.000000e+00)
+ %4 = tail call float @llvm.fmuladd.f32(float %1, float 0.000000e+00, float 0.000000e+00)
+ %5 = tail call float @llvm.fmuladd.f32(float %2, float 0.000000e+00, float 0.000000e+00)
+ %mul.i457 = fmul float %3, 0.000000e+00
+ %mul2.i459 = fmul float %4, 0.000000e+00
+ %mul4.i461 = fmul float %5, 0.000000e+00
+ %dsco = getelementptr float, ptr %spoint, i64 0
+ store float %mul.i457, ptr %dsco, align 4
+ %arrayidx3.i474 = getelementptr float, ptr %spoint, i64 1
+ store float %mul2.i459, ptr %arrayidx3.i474, align 4
+ %arrayidx5.i476 = getelementptr float, ptr %spoint, i64 2
+ store float %mul4.i461, ptr %arrayidx5.i476, align 4
+ ret void
+}
+
+define void @reorder_indices_2x_load(ptr %png_ptr, ptr %info_ptr) {
+; CHECK-LABEL: define void @reorder_indices_2x_load(
+; CHECK-SAME: ptr [[PNG_PTR:%.*]], ptr [[INFO_PTR:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[BIT_DEPTH:%.*]] = getelementptr i8, ptr [[INFO_PTR]], i64 0
+; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[BIT_DEPTH]], align 4
+; CHECK-NEXT: [[COLOR_TYPE:%.*]] = getelementptr i8, ptr [[INFO_PTR]], i64 1
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[COLOR_TYPE]], align 1
+; CHECK-NEXT: [[BIT_DEPTH37_I:%.*]] = getelementptr i8, ptr [[PNG_PTR]], i64 11
+; CHECK-NEXT: store i8 [[TMP0]], ptr [[BIT_DEPTH37_I]], align 1
+; CHECK-NEXT: [[COLOR_TYPE39_I:%.*]] = getelementptr i8, ptr [[PNG_PTR]], i64 10
+; CHECK-NEXT: store i8 [[TMP1]], ptr [[COLOR_TYPE39_I]], align 2
+; CHECK-NEXT: [[USR_BIT_DEPTH_I:%.*]] = getelementptr i8, ptr [[PNG_PTR]], i64 12
+; CHECK-NEXT: store i8 [[TMP0]], ptr [[USR_BIT_DEPTH_I]], align 8
+; CHECK-NEXT: ret void
+;
+entry:
+ %bit_depth = getelementptr i8, ptr %info_ptr, i64 0
+ %0 = load i8, ptr %bit_depth, align 4
+ %color_type = getelementptr i8, ptr %info_ptr, i64 1
+ %1 = load i8, ptr %color_type, align 1
+ %bit_depth37.i = getelementptr i8, ptr %png_ptr, i64 11
+ store i8 %0, ptr %bit_depth37.i, align 1
+ %color_type39.i = getelementptr i8, ptr %png_ptr, i64 10
+ store i8 %1, ptr %color_type39.i, align 2
+ %usr_bit_depth.i = getelementptr i8, ptr %png_ptr, i64 12
+ store i8 %0, ptr %usr_bit_depth.i, align 8
+ ret void
+}
+
+define void @reuse_shuffle_indidces_1(ptr %col, float %0, float %1) {
+; CHECK-LABEL: define void @reuse_shuffle_indidces_1(
+; CHECK-SAME: ptr [[COL:%.*]], float [[TMP0:%.*]], float [[TMP1:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x float> poison, float [[TMP1]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x float> [[TMP2]], float [[TMP0]], i32 1
+; CHECK-NEXT: [[TMP4:%.*]] = fmul <2 x float> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP5:%.*]] = fadd <2 x float> [[TMP4]], zeroinitializer
+; CHECK-NEXT: store <2 x float> [[TMP5]], ptr [[COL]], align 4
+; CHECK-NEXT: [[ARRAYIDX33:%.*]] = getelementptr float, ptr [[COL]], i64 2
+; CHECK-NEXT: [[MUL38:%.*]] = fmul float [[TMP0]], 0.000000e+00
+; CHECK-NEXT: [[TMP6:%.*]] = fadd float [[MUL38]], 0.000000e+00
+; CHECK-NEXT: store float [[TMP6]], ptr [[ARRAYIDX33]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %mul24 = fmul float %1, 0.000000e+00
+ %2 = fadd float %mul24, 0.000000e+00
+ store float %2, ptr %col, align 4
+ %arrayidx26 = getelementptr float, ptr %col, i64 1
+ %mul31 = fmul float %0, 0.000000e+00
+ %3 = fadd float %mul31, 0.000000e+00
+ store float %3, ptr %arrayidx26, align 4
+ %arrayidx33 = getelementptr float, ptr %col, i64 2
+ %mul38 = fmul float %0, 0.000000e+00
+ %4 = fadd float %mul38, 0.000000e+00
+ store float %4, ptr %arrayidx33, align 4
+ ret void
+}
+
+define void @reuse_shuffle_indices_2(ptr %inertia, double %0) {
+; CHECK-LABEL: define void @reuse_shuffle_indices_2(
+; CHECK-SAME: ptr [[INERTIA:%.*]], double [[TMP0:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> poison, double [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP3:%.*]] = fptrunc <2 x double> [[TMP2]] to <2 x float>
+; CHECK-NEXT: [[TMP4:%.*]] = fmul <2 x float> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 1, i32 poison>
+; CHECK-NEXT: [[TMP6:%.*]] = fadd <4 x float> [[TMP5]], <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float undef>
+; CHECK-NEXT: [[TMP7:%.*]] = fmul <4 x float> [[TMP6]], <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float undef>
+; CHECK-NEXT: [[TMP8:%.*]] = fadd <4 x float> [[TMP7]], <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float undef>
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <4 x float> [[TMP8]], <4 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; CHECK-NEXT: store <3 x float> [[TMP9]], ptr [[INERTIA]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %1 = insertelement <2 x double> poison, double %0, i32 0
+ %2 = shufflevector <2 x double> %1, <2 x double> poison, <2 x i32> zeroinitializer
+ %3 = fptrunc <2 x double> %2 to <2 x float>
+ %4 = fmul <2 x float> %3, zeroinitializer
+ %5 = shufflevector <2 x float> %4, <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 1, i32 poison>
+ %6 = fadd <4 x float> %5, <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float undef>
+ %7 = fmul <4 x float> %6, <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float undef>
+ %8 = fadd <4 x float> %7, <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float undef>
+ %9 = shufflevector <4 x float> %8, <4 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
+ store <3 x float> %9, ptr %inertia, align 4
+ ret void
+}
+
+define void @reuse_shuffle_indices_cost_crash_2(ptr %bezt, float %0) {
+; CHECK-LABEL: define void @reuse_shuffle_indices_cost_crash_2(
+; CHECK-SAME: ptr [[BEZT:%.*]], float [[TMP0:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[FNEG:%.*]] = fmul float [[TMP0]], 0.000000e+00
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x float> poison, float [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x float> poison, float [[FNEG]], i32 0
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x float> [[TMP3]], <2 x float> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP5:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> [[TMP2]], <2 x float> [[TMP4]], <2 x float> zeroinitializer)
+; CHECK-NEXT: store <2 x float> [[TMP5]], ptr [[BEZT]], align 4
+; CHECK-NEXT: [[TMP6:%.*]] = tail call float @llvm.fmuladd.f32(float [[FNEG]], float 0.000000e+00, float 0.000000e+00)
+; CHECK-NEXT: [[ARRAYIDX8_I831:%.*]] = getelementptr float, ptr [[BEZT]], i64 2
+; CHECK-NEXT: store float [[TMP6]], ptr [[ARRAYIDX8_I831]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %fneg = fmul float %0, 0.000000e+00
+ %1 = tail call float @llvm.fmuladd.f32(float %0, float %fneg, float 0.000000e+00)
+ store float %1, ptr %bezt, align 4
+ %2 = tail call float @llvm.fmuladd.f32(float %0, float %fneg, float 0.000000e+00)
+ %arrayidx5.i = getelementptr float, ptr %bezt, i64 1
+ store float %2, ptr %arrayidx5.i, align 4
+ %3 = tail call float @llvm.fmuladd.f32(float %fneg, float 0.000000e+00, float 0.000000e+00)
+ %arrayidx8.i831 = getelementptr float, ptr %bezt, i64 2
+ store float %3, ptr %arrayidx8.i831, align 4
+ ret void
+}
+
+define void @reuse_shuffle_indices_cost_crash_3(ptr %m, double %conv, double %conv2) {
+; CHECK-LABEL: define void @reuse_shuffle_indices_cost_crash_3(
+; CHECK-SAME: ptr [[M:%.*]], double [[CONV:%.*]], double [[CONV2:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[SUB19:%.*]] = fsub double 0.000000e+00, [[CONV2]]
+; CHECK-NEXT: [[CONV20:%.*]] = fptrunc double [[SUB19]] to float
+; CHECK-NEXT: store float [[CONV20]], ptr [[M]], align 4
+; CHECK-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 0.000000e+00
+; CHECK-NEXT: [[CONV239:%.*]] = fptrunc double [[ADD]] to float
+; CHECK-NEXT: [[ARRAYIDX25:%.*]] = getelementptr [4 x float], ptr [[M]], i64 0, i64 1
+; CHECK-NEXT: store float [[CONV239]], ptr [[ARRAYIDX25]], align 4
+; CHECK-NEXT: [[ADD26:%.*]] = fsub double [[CONV]], [[CONV]]
+; CHECK-NEXT: [[CONV27:%.*]] = fptrunc double [[ADD26]] to float
+; CHECK-NEXT: [[ARRAYIDX29:%.*]] = getelementptr [4 x float], ptr [[M]], i64 0, i64 2
+; CHECK-NEXT: store float [[CONV27]], ptr [[ARRAYIDX29]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %sub19 = fsub double 0.000000e+00, %conv2
+ %conv20 = fptrunc double %sub19 to float
+ store float %conv20, ptr %m, align 4
+ %add = fadd double %conv, 0.000000e+00
+ %conv239 = fptrunc double %add to float
+ %arrayidx25 = getelementptr [4 x float], ptr %m, i64 0, i64 1
+ store float %conv239, ptr %arrayidx25, align 4
+ %add26 = fsub double %conv, %conv
+ %conv27 = fptrunc double %add26 to float
+ %arrayidx29 = getelementptr [4 x float], ptr %m, i64 0, i64 2
+ store float %conv27, ptr %arrayidx29, align 4
+ ret void
+}
+
+define void @reuse_shuffle_indices_cost_crash_4(double %conv7.i) {
+; CHECK-LABEL: define void @reuse_shuffle_indices_cost_crash_4(
+; CHECK-SAME: double [[CONV7_I:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[DATA_I111:%.*]] = alloca [0 x [0 x [0 x [3 x float]]]], i32 0, align 4
+; CHECK-NEXT: [[ARRAYIDX_2_I:%.*]] = getelementptr [3 x float], ptr [[DATA_I111]], i64 0, i64 2
+; CHECK-NEXT: [[MUL17_I_US:%.*]] = fmul double [[CONV7_I]], 0.000000e+00
+; CHECK-NEXT: [[MUL_2_I_I_US:%.*]] = fmul double [[MUL17_I_US]], 0.000000e+00
+; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x double> poison, double [[CONV7_I]], i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x double> [[TMP0]], <2 x double> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = fadd <2 x double> [[TMP1]], zeroinitializer
+; CHECK-NEXT: [[ADD_2_I_I_US:%.*]] = fadd double [[MUL_2_I_I_US]], 0.000000e+00
+; CHECK-NEXT: [[TMP3:%.*]] = fmul <2 x double> [[TMP2]], [[TMP1]]
+; CHECK-NEXT: [[TMP4:%.*]] = fadd <2 x double> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP5:%.*]] = fptrunc <2 x double> [[TMP4]] to <2 x float>
+; CHECK-NEXT: store <2 x float> [[TMP5]], ptr [[DATA_I111]], align 4
+; CHECK-NEXT: [[CONV_2_I46_US:%.*]] = fptrunc double [[ADD_2_I_I_US]] to float
+; CHECK-NEXT: store float [[CONV_2_I46_US]], ptr [[ARRAYIDX_2_I]], align 4
+; CHECK-NEXT: [[CALL2_I_US:%.*]] = load volatile ptr, ptr [[DATA_I111]], align 8
+; CHECK-NEXT: ret void
+;
+entry:
+ %data.i111 = alloca [0 x [0 x [0 x [3 x float]]]], i32 0, align 4
+ %arrayidx.1.i = getelementptr [3 x float], ptr %data.i111, i64 0, i64 1
+ %arrayidx.2.i = getelementptr [3 x float], ptr %data.i111, i64 0, i64 2
+ %mul17.i.us = fmul double %conv7.i, 0.000000e+00
+ %mul.2.i.i.us = fmul double %mul17.i.us, 0.000000e+00
+ %add.i.i82.i.us = fadd double %conv7.i, 0.000000e+00
+ %add.1.i.i84.i.us = fadd double %conv7.i, 0.000000e+00
+ %mul.i.i91.i.us = fmul double %add.i.i82.i.us, %conv7.i
+ %mul.1.i.i92.i.us = fmul double %add.1.i.i84.i.us, %conv7.i
+ %add.i96.i.us = fadd double %mul.i.i91.i.us, 0.000000e+00
+ %add.1.i.i.us = fadd double %mul.1.i.i92.i.us, 0.000000e+00
+ %add.2.i.i.us = fadd double %mul.2.i.i.us, 0.000000e+00
+ %conv.i42.us = fptrunc double %add.i96.i.us to float
+ store float %conv.i42.us, ptr %data.i111, align 4
+ %conv.1.i44.us = fptrunc double %add.1.i.i.us to float
+ store float %conv.1.i44.us, ptr %arrayidx.1.i, align 4
+ %conv.2.i46.us = fptrunc double %add.2.i.i.us to float
+ store float %conv.2.i46.us, ptr %arrayidx.2.i, align 4
+ %call2.i.us = load volatile ptr, ptr %data.i111, align 8
+ ret void
+}
+
+define void @common_mask(ptr %m, double %conv, double %conv2) {
+; CHECK-LABEL: define void @common_mask(
+; CHECK-SAME: ptr [[M:%.*]], double [[CONV:%.*]], double [[CONV2:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[SUB19:%.*]] = fsub double [[CONV]], [[CONV]]
+; CHECK-NEXT: [[CONV20:%.*]] = fptrunc double [[SUB19]] to float
+; CHECK-NEXT: store float [[CONV20]], ptr [[M]], align 4
+; CHECK-NEXT: [[ADD:%.*]] = fadd double [[CONV2]], 0.000000e+00
+; CHECK-NEXT: [[CONV239:%.*]] = fptrunc double [[ADD]] to float
+; CHECK-NEXT: [[ARRAYIDX25:%.*]] = getelementptr [4 x float], ptr [[M]], i64 0, i64 1
+; CHECK-NEXT: store float [[CONV239]], ptr [[ARRAYIDX25]], align 4
+; CHECK-NEXT: [[ADD26:%.*]] = fsub double 0.000000e+00, [[CONV]]
+; CHECK-NEXT: [[CONV27:%.*]] = fptrunc double [[ADD26]] to float
+; CHECK-NEXT: [[ARRAYIDX29:%.*]] = getelementptr [4 x float], ptr [[M]], i64 0, i64 2
+; CHECK-NEXT: store float [[CONV27]], ptr [[ARRAYIDX29]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %sub19 = fsub double %conv, %conv
+ %conv20 = fptrunc double %sub19 to float
+ store float %conv20, ptr %m, align 4
+ %add = fadd double %conv2, 0.000000e+00
+ %conv239 = fptrunc double %add to float
+ %arrayidx25 = getelementptr [4 x float], ptr %m, i64 0, i64 1
+ store float %conv239, ptr %arrayidx25, align 4
+ %add26 = fsub double 0.000000e+00, %conv
+ %conv27 = fptrunc double %add26 to float
+ %arrayidx29 = getelementptr [4 x float], ptr %m, i64 0, i64 2
+ store float %conv27, ptr %arrayidx29, align 4
+ ret void
+}
+
+define void @vec3_extract(<3 x i16> %pixel.sroa.0.4.vec.insert606, ptr %call3.i536) {
+; CHECK-LABEL: define void @vec3_extract(
+; CHECK-SAME: <3 x i16> [[PIXEL_SROA_0_4_VEC_INSERT606:%.*]], ptr [[CALL3_I536:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[PIXEL_SROA_0_4_VEC_EXTRACT:%.*]] = extractelement <3 x i16> [[PIXEL_SROA_0_4_VEC_INSERT606]], i64 2
+; CHECK-NEXT: [[RED668:%.*]] = getelementptr i16, ptr [[CALL3_I536]], i64 2
+; CHECK-NEXT: store i16 [[PIXEL_SROA_0_4_VEC_EXTRACT]], ptr [[RED668]], align 2
+; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x i16> [[PIXEL_SROA_0_4_VEC_INSERT606]], <3 x i16> poison, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT: store <2 x i16> [[TMP0]], ptr [[CALL3_I536]], align 2
+; CHECK-NEXT: ret void
+;
+entry:
+ %pixel.sroa.0.4.vec.extract = extractelement <3 x i16> %pixel.sroa.0.4.vec.insert606, i64 2
+ %red668 = getelementptr i16, ptr %call3.i536, i64 2
+ store i16 %pixel.sroa.0.4.vec.extract, ptr %red668, align 2
+ %pixel.sroa.0.2.vec.extract = extractelement <3 x i16> %pixel.sroa.0.4.vec.insert606, i64 1
+ %green670 = getelementptr i16, ptr %call3.i536, i64 1
+ store i16 %pixel.sroa.0.2.vec.extract, ptr %green670, align 2
+ %pixel.sroa.0.0.vec.extract = extractelement <3 x i16> %pixel.sroa.0.4.vec.insert606, i64 0
+ store i16 %pixel.sroa.0.0.vec.extract, ptr %call3.i536, align 2
+ ret void
+}
+
+declare float @llvm.fmuladd.f32(float, float, float)
diff --git a/llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/reorder-gep-inbounds.ll b/llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/reorder-gep-inbounds.ll
new file mode 100644
index 0000000..c24bbd5
--- /dev/null
+++ b/llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/reorder-gep-inbounds.ll
@@ -0,0 +1,51 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -S -passes=separate-const-offset-from-gep < %s | FileCheck %s
+
+define void @inboundsPossiblyNegative(ptr %in.ptr, i32 %in.idx1) {
+; CHECK-LABEL: define void @inboundsPossiblyNegative(
+; CHECK-SAME: ptr [[IN_PTR:%.*]], i32 [[IN_IDX1:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[IN_IDX1]] to i64
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr <2 x i8>, ptr [[IN_PTR]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr <2 x i8>, ptr [[TMP0]], i32 1
+; CHECK-NEXT: ret void
+;
+entry:
+ %const1 = getelementptr inbounds <2 x i8>, ptr %in.ptr, i32 1
+ %idx1 = getelementptr inbounds <2 x i8>, ptr %const1, i32 %in.idx1
+ ret void
+}
+
+define void @inboundsNonNegative(ptr %in.ptr, i32 %in.idx1) {
+; CHECK-LABEL: define void @inboundsNonNegative(
+; CHECK-SAME: ptr [[IN_PTR:%.*]], i32 [[IN_IDX1:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[IN_IDX1_NNEG:%.*]] = and i32 [[IN_IDX1]], 2147483647
+; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[IN_IDX1_NNEG]] to i64
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds <2 x i8>, ptr [[IN_PTR]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds <2 x i8>, ptr [[TMP0]], i32 1
+; CHECK-NEXT: ret void
+;
+entry:
+ %in.idx1.nneg = and i32 %in.idx1, 2147483647
+ %const1 = getelementptr inbounds <2 x i8>, ptr %in.ptr, i32 1
+ %idx1 = getelementptr inbounds <2 x i8>, ptr %const1, i32 %in.idx1.nneg
+ ret void
+}
+
+define void @inboundsNonchained(ptr %in.ptr, i32 %in.idx1) {
+; CHECK-LABEL: define void @inboundsNonchained(
+; CHECK-SAME: ptr [[IN_PTR:%.*]], i32 [[IN_IDX1:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[IN_IDX1_NNEG:%.*]] = and i32 [[IN_IDX1]], 2147483647
+; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[IN_IDX1_NNEG]] to i64
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr <2 x i8>, ptr [[IN_PTR]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr <2 x i8>, ptr [[TMP0]], i32 1
+; CHECK-NEXT: ret void
+;
+entry:
+ %in.idx1.nneg = and i32 %in.idx1, 2147483647
+ %const1 = getelementptr inbounds <2 x i8>, ptr %in.ptr, i32 1
+ %idx1 = getelementptr <2 x i8>, ptr %const1, i32 %in.idx1.nneg
+ ret void
+}
diff --git a/llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/reorder-gep.ll b/llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/reorder-gep.ll
new file mode 100644
index 0000000..7137f0f
--- /dev/null
+++ b/llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/reorder-gep.ll
@@ -0,0 +1,175 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a --start-before=separate-const-offset-from-gep < %s | FileCheck %s
+
+define protected amdgpu_kernel void @sink_addr(ptr addrspace(3) %in.ptr, i32 %in.idx0, i32 %in.idx1) {
+; CHECK-LABEL: sink_addr:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_load_dwordx4 s[0:3], s[6:7], 0x0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_lshl_b32 s3, s1, 1
+; CHECK-NEXT: s_add_i32 s0, s0, s3
+; CHECK-NEXT: s_lshl_b32 s2, s2, 1
+; CHECK-NEXT: s_add_i32 s0, s0, s2
+; CHECK-NEXT: s_cmp_lg_u32 s1, 0
+; CHECK-NEXT: s_cbranch_scc1 .LBB0_2
+; CHECK-NEXT: ; %bb.1: ; %bb.1
+; CHECK-NEXT: v_mov_b32_e32 v12, s0
+; CHECK-NEXT: ds_read_b128 v[0:3], v12
+; CHECK-NEXT: ds_read_b128 v[4:7], v12 offset:512
+; CHECK-NEXT: ds_read_b128 v[8:11], v12 offset:1024
+; CHECK-NEXT: ds_read_b128 v[12:15], v12 offset:1536
+; CHECK-NEXT: s_waitcnt lgkmcnt(3)
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v[0:3]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_waitcnt lgkmcnt(2)
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v[4:7]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v[8:11]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v[12:15]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: .LBB0_2: ; %end
+; CHECK-NEXT: s_add_i32 s1, s0, 0x200
+; CHECK-NEXT: v_mov_b32_e32 v0, s0
+; CHECK-NEXT: s_add_i32 s2, s0, 0x400
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v0
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: v_mov_b32_e32 v0, s1
+; CHECK-NEXT: s_add_i32 s3, s0, 0x600
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v0
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: v_mov_b32_e32 v0, s2
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v0
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: v_mov_b32_e32 v0, s3
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v0
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_endpgm
+entry:
+ %base = getelementptr half, ptr addrspace(3) %in.ptr, i32 %in.idx0
+ %idx0 = getelementptr half, ptr addrspace(3) %base, i32 %in.idx1
+ %const1 = getelementptr half, ptr addrspace(3) %base, i32 256
+ %idx1 = getelementptr half, ptr addrspace(3) %const1, i32 %in.idx1
+ %const2 = getelementptr half, ptr addrspace(3) %base, i32 512
+ %idx2 = getelementptr half, ptr addrspace(3) %const2, i32 %in.idx1
+ %const3 = getelementptr half, ptr addrspace(3) %base, i32 768
+ %idx3 = getelementptr half, ptr addrspace(3) %const3, i32 %in.idx1
+ %cmp0 = icmp eq i32 %in.idx0, 0
+ br i1 %cmp0, label %bb.1, label %end
+
+bb.1:
+ %val0 = load <8 x half>, ptr addrspace(3) %idx0, align 16
+ %val1 = load <8 x half>, ptr addrspace(3) %idx1, align 16
+ %val2 = load <8 x half>, ptr addrspace(3) %idx2, align 16
+ %val3 = load <8 x half>, ptr addrspace(3) %idx3, align 16
+ call void asm sideeffect "; use $0", "v"(<8 x half> %val0)
+ call void asm sideeffect "; use $0", "v"(<8 x half> %val1)
+ call void asm sideeffect "; use $0", "v"(<8 x half> %val2)
+ call void asm sideeffect "; use $0", "v"(<8 x half> %val3)
+ br label %end
+
+end:
+ call void asm sideeffect "; use $0", "v"(ptr addrspace(3) %idx0)
+ call void asm sideeffect "; use $0", "v"(ptr addrspace(3) %idx1)
+ call void asm sideeffect "; use $0", "v"(ptr addrspace(3) %idx2)
+ call void asm sideeffect "; use $0", "v"(ptr addrspace(3) %idx3)
+ ret void
+}
+
+define protected amdgpu_kernel void @illegal_addr_mode(ptr addrspace(3) %in.ptr, i32 %in.idx0, i32 %in.idx1) {
+; CHECK-LABEL: illegal_addr_mode:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_load_dwordx4 s[4:7], s[6:7], 0x0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_lshl_b32 s0, s5, 1
+; CHECK-NEXT: s_lshl_b32 s1, s6, 1
+; CHECK-NEXT: s_add_i32 s3, s4, s0
+; CHECK-NEXT: s_add_i32 s3, s3, s1
+; CHECK-NEXT: s_add_i32 s2, s3, 0x12a60
+; CHECK-NEXT: s_add_i32 s1, s3, 0x12c60
+; CHECK-NEXT: s_add_i32 s0, s3, 0x12ed8
+; CHECK-NEXT: s_cmp_lg_u32 s5, 0
+; CHECK-NEXT: s_cbranch_scc1 .LBB1_2
+; CHECK-NEXT: ; %bb.1: ; %bb.1
+; CHECK-NEXT: v_mov_b32_e32 v0, s3
+; CHECK-NEXT: v_mov_b32_e32 v4, s2
+; CHECK-NEXT: v_mov_b32_e32 v8, s1
+; CHECK-NEXT: v_mov_b32_e32 v12, s0
+; CHECK-NEXT: ds_read_b128 v[0:3], v0
+; CHECK-NEXT: ds_read_b128 v[4:7], v4
+; CHECK-NEXT: ds_read_b128 v[8:11], v8
+; CHECK-NEXT: ds_read_b128 v[12:15], v12
+; CHECK-NEXT: s_waitcnt lgkmcnt(3)
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v[0:3]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_waitcnt lgkmcnt(2)
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v[4:7]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v[8:11]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v[12:15]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: .LBB1_2: ; %end
+; CHECK-NEXT: v_mov_b32_e32 v0, s3
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v0
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: v_mov_b32_e32 v0, s2
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v0
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: v_mov_b32_e32 v0, s1
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v0
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: v_mov_b32_e32 v0, s0
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v0
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_endpgm
+entry:
+ %base = getelementptr half, ptr addrspace(3) %in.ptr, i32 %in.idx0
+ %idx0 = getelementptr half, ptr addrspace(3) %base, i32 %in.idx1
+ %const1 = getelementptr half, ptr addrspace(3) %base, i32 38192
+ %idx1 = getelementptr half, ptr addrspace(3) %const1, i32 %in.idx1
+ %const2 = getelementptr half, ptr addrspace(3) %base, i32 38448
+ %idx2 = getelementptr half, ptr addrspace(3) %const2, i32 %in.idx1
+ %const3 = getelementptr half, ptr addrspace(3) %base, i32 38764
+ %idx3 = getelementptr half, ptr addrspace(3) %const3, i32 %in.idx1
+ %cmp0 = icmp eq i32 %in.idx0, 0
+ br i1 %cmp0, label %bb.1, label %end
+
+bb.1:
+ %val0 = load <8 x half>, ptr addrspace(3) %idx0, align 16
+ %val1 = load <8 x half>, ptr addrspace(3) %idx1, align 16
+ %val2 = load <8 x half>, ptr addrspace(3) %idx2, align 16
+ %val3 = load <8 x half>, ptr addrspace(3) %idx3, align 16
+ call void asm sideeffect "; use $0", "v"(<8 x half> %val0)
+ call void asm sideeffect "; use $0", "v"(<8 x half> %val1)
+ call void asm sideeffect "; use $0", "v"(<8 x half> %val2)
+ call void asm sideeffect "; use $0", "v"(<8 x half> %val3)
+ br label %end
+
+end:
+ call void asm sideeffect "; use $0", "v"(ptr addrspace(3) %idx0)
+ call void asm sideeffect "; use $0", "v"(ptr addrspace(3) %idx1)
+ call void asm sideeffect "; use $0", "v"(ptr addrspace(3) %idx2)
+ call void asm sideeffect "; use $0", "v"(ptr addrspace(3) %idx3)
+ ret void
+}
diff --git a/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lower-gep-reorder.ll b/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lower-gep-reorder.ll
new file mode 100644
index 0000000..a91c817
--- /dev/null
+++ b/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lower-gep-reorder.ll
@@ -0,0 +1,65 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
+; RUN: opt -mtriple=nvptx64-nvidia-cuda -S -passes=separate-const-offset-from-gep < %s | FileCheck %s
+
+define protected amdgpu_kernel void @sink_addr(ptr %in.ptr, i64 %in.idx0, i64 %in.idx1) {
+; CHECK-LABEL: define protected amdgpu_kernel void @sink_addr(
+; CHECK-SAME: ptr [[IN_PTR:%.*]], i64 [[IN_IDX0:%.*]], i64 [[IN_IDX1:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[IDX0:%.*]] = getelementptr [8192 x i64], ptr [[IN_PTR]], i64 [[IN_IDX0]], i64 [[IN_IDX1]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr [8192 x i64], ptr [[IN_PTR]], i64 [[IN_IDX0]], i64 0
+; CHECK-NEXT: [[CONST11:%.*]] = getelementptr i8, ptr [[TMP0]], i64 2048
+; CHECK-NEXT: [[IDX1:%.*]] = getelementptr i64, ptr [[CONST11]], i64 [[IN_IDX1]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [8192 x i64], ptr [[IN_PTR]], i64 [[IN_IDX0]], i64 0
+; CHECK-NEXT: [[CONST22:%.*]] = getelementptr i8, ptr [[TMP1]], i64 4096
+; CHECK-NEXT: [[IDX2:%.*]] = getelementptr i64, ptr [[CONST22]], i64 [[IN_IDX1]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr [8192 x i64], ptr [[IN_PTR]], i64 [[IN_IDX0]], i64 0
+; CHECK-NEXT: [[CONST33:%.*]] = getelementptr i8, ptr [[TMP2]], i64 6144
+; CHECK-NEXT: [[IDX3:%.*]] = getelementptr i64, ptr [[CONST33]], i64 [[IN_IDX1]]
+; CHECK-NEXT: [[CMP0:%.*]] = icmp eq i64 [[IN_IDX0]], 0
+; CHECK-NEXT: br i1 [[CMP0]], label [[BB_1:%.*]], label [[END:%.*]]
+; CHECK: bb.1:
+; CHECK-NEXT: [[VAL0:%.*]] = load <8 x i64>, ptr [[IDX0]], align 16
+; CHECK-NEXT: [[VAL1:%.*]] = load <8 x i64>, ptr [[IDX1]], align 16
+; CHECK-NEXT: [[VAL2:%.*]] = load <8 x i64>, ptr [[IDX2]], align 16
+; CHECK-NEXT: [[VAL3:%.*]] = load <8 x i64>, ptr [[IDX3]], align 16
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+entry:
+ %idx0 = getelementptr [8192 x i64], ptr %in.ptr, i64 %in.idx0, i64 %in.idx1
+ %const1 = getelementptr [8192 x i64], ptr %in.ptr, i64 %in.idx0, i64 256
+ %idx1 = getelementptr i64, ptr %const1, i64 %in.idx1
+ %const2 = getelementptr [8192 x i64], ptr %in.ptr, i64 %in.idx0, i64 512
+ %idx2 = getelementptr i64, ptr %const2, i64 %in.idx1
+ %const3 = getelementptr [8192 x i64], ptr %in.ptr, i64 %in.idx0, i64 768
+ %idx3 = getelementptr i64, ptr %const3, i64 %in.idx1
+ %cmp0 = icmp eq i64 %in.idx0, 0
+ br i1 %cmp0, label %bb.1, label %end
+
+bb.1:
+ %val0 = load <8 x i64>, ptr %idx0, align 16
+ %val1 = load <8 x i64>, ptr %idx1, align 16
+ %val2 = load <8 x i64>, ptr %idx2, align 16
+ %val3 = load <8 x i64>, ptr %idx3, align 16
+ call void asm sideeffect "; use $0", "v"(<8 x i64> %val0)
+ call void asm sideeffect "; use $0", "v"(<8 x i64> %val1)
+ call void asm sideeffect "; use $0", "v"(<8 x i64> %val2)
+ call void asm sideeffect "; use $0", "v"(<8 x i64> %val3)
+ br label %end
+
+end:
+ call void asm sideeffect "; use $0", "v"(ptr %idx0)
+ call void asm sideeffect "; use $0", "v"(ptr %idx1)
+ call void asm sideeffect "; use $0", "v"(ptr %idx2)
+ call void asm sideeffect "; use $0", "v"(ptr %idx3)
+ ret void
+}
diff --git a/llvm/test/Transforms/SeparateConstOffsetFromGEP/reorder-gep.ll b/llvm/test/Transforms/SeparateConstOffsetFromGEP/reorder-gep.ll
new file mode 100644
index 0000000..a15f11a
--- /dev/null
+++ b/llvm/test/Transforms/SeparateConstOffsetFromGEP/reorder-gep.ll
@@ -0,0 +1,188 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
+; RUN: opt -S -passes=separate-const-offset-from-gep < %s | FileCheck %s
+
+define void @illegal_addr_mode(ptr %in.ptr, i64 %in.idx0, i64 %in.idx1) {
+; CHECK-LABEL: define void @illegal_addr_mode(
+; CHECK-SAME: ptr [[IN_PTR:%.*]], i64 [[IN_IDX0:%.*]], i64 [[IN_IDX1:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[BASE:%.*]] = getelementptr i64, ptr [[IN_PTR]], i64 [[IN_IDX0]]
+; CHECK-NEXT: [[IDX0:%.*]] = getelementptr i64, ptr [[BASE]], i64 [[IN_IDX1]]
+; CHECK-NEXT: [[CONST1:%.*]] = getelementptr i64, ptr [[BASE]], i64 256
+; CHECK-NEXT: [[IDX1:%.*]] = getelementptr i64, ptr [[CONST1]], i64 [[IN_IDX1]]
+; CHECK-NEXT: [[CONST2:%.*]] = getelementptr i64, ptr [[BASE]], i64 512
+; CHECK-NEXT: [[IDX2:%.*]] = getelementptr i64, ptr [[CONST2]], i64 [[IN_IDX1]]
+; CHECK-NEXT: [[CONST3:%.*]] = getelementptr i64, ptr [[BASE]], i64 768
+; CHECK-NEXT: [[IDX3:%.*]] = getelementptr i64, ptr [[CONST3]], i64 [[IN_IDX1]]
+; CHECK-NEXT: [[CMP0:%.*]] = icmp eq i64 [[IN_IDX0]], 0
+; CHECK-NEXT: br i1 [[CMP0]], label [[BB_1:%.*]], label [[END:%.*]]
+; CHECK: bb.1:
+; CHECK-NEXT: [[VAL0:%.*]] = load <8 x i64>, ptr [[IDX0]], align 16
+; CHECK-NEXT: [[VAL1:%.*]] = load <8 x i64>, ptr [[IDX1]], align 16
+; CHECK-NEXT: [[VAL2:%.*]] = load <8 x i64>, ptr [[IDX2]], align 16
+; CHECK-NEXT: [[VAL3:%.*]] = load <8 x i64>, ptr [[IDX3]], align 16
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+entry:
+ %base = getelementptr i64, ptr %in.ptr, i64 %in.idx0
+ %idx0 = getelementptr i64, ptr %base, i64 %in.idx1
+ %const1 = getelementptr i64, ptr %base, i64 256
+ %idx1 = getelementptr i64, ptr %const1, i64 %in.idx1
+ %const2 = getelementptr i64, ptr %base, i64 512
+ %idx2 = getelementptr i64, ptr %const2, i64 %in.idx1
+ %const3 = getelementptr i64, ptr %base, i64 768
+ %idx3 = getelementptr i64, ptr %const3, i64 %in.idx1
+ %cmp0 = icmp eq i64 %in.idx0, 0
+ br i1 %cmp0, label %bb.1, label %end
+
+bb.1:
+ %val0 = load <8 x i64>, ptr %idx0, align 16
+ %val1 = load <8 x i64>, ptr %idx1, align 16
+ %val2 = load <8 x i64>, ptr %idx2, align 16
+ %val3 = load <8 x i64>, ptr %idx3, align 16
+ call void asm sideeffect "; use $0", "v"(<8 x i64> %val0)
+ call void asm sideeffect "; use $0", "v"(<8 x i64> %val1)
+ call void asm sideeffect "; use $0", "v"(<8 x i64> %val2)
+ call void asm sideeffect "; use $0", "v"(<8 x i64> %val3)
+ br label %end
+
+end:
+ call void asm sideeffect "; use $0", "v"(ptr %idx0)
+ call void asm sideeffect "; use $0", "v"(ptr %idx1)
+ call void asm sideeffect "; use $0", "v"(ptr %idx2)
+ call void asm sideeffect "; use $0", "v"(ptr %idx3)
+ ret void
+}
+
+
+define void @multi_index_reorder(ptr %in.ptr, i64 %in.idx0, i64 %in.idx1) {
+; CHECK-LABEL: define void @multi_index_reorder(
+; CHECK-SAME: ptr [[IN_PTR:%.*]], i64 [[IN_IDX0:%.*]], i64 [[IN_IDX1:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[IDX0:%.*]] = getelementptr [8192 x i64], ptr [[IN_PTR]], i64 0, i64 [[IN_IDX1]]
+; CHECK-NEXT: [[CONST1:%.*]] = getelementptr [8192 x i64], ptr [[IN_PTR]], i64 0, i64 256
+; CHECK-NEXT: [[IDX1:%.*]] = getelementptr i64, ptr [[CONST1]], i64 [[IN_IDX1]]
+; CHECK-NEXT: [[CONST2:%.*]] = getelementptr [8192 x i64], ptr [[IN_PTR]], i64 0, i64 512
+; CHECK-NEXT: [[IDX2:%.*]] = getelementptr i64, ptr [[CONST2]], i64 [[IN_IDX1]]
+; CHECK-NEXT: [[CONST3:%.*]] = getelementptr [8192 x i64], ptr [[IN_PTR]], i64 0, i64 768
+; CHECK-NEXT: [[IDX3:%.*]] = getelementptr i64, ptr [[CONST3]], i64 [[IN_IDX1]]
+; CHECK-NEXT: [[CMP0:%.*]] = icmp eq i64 [[IN_IDX0]], 0
+; CHECK-NEXT: br i1 [[CMP0]], label [[BB_1:%.*]], label [[END:%.*]]
+; CHECK: bb.1:
+; CHECK-NEXT: [[VAL0:%.*]] = load <8 x i64>, ptr [[IDX0]], align 16
+; CHECK-NEXT: [[VAL1:%.*]] = load <8 x i64>, ptr [[IDX1]], align 16
+; CHECK-NEXT: [[VAL2:%.*]] = load <8 x i64>, ptr [[IDX2]], align 16
+; CHECK-NEXT: [[VAL3:%.*]] = load <8 x i64>, ptr [[IDX3]], align 16
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+entry:
+ %idx0 = getelementptr [8192 x i64], ptr %in.ptr, i64 0, i64 %in.idx1
+ %const1 = getelementptr [8192 x i64], ptr %in.ptr, i64 0, i64 256
+ %idx1 = getelementptr i64, ptr %const1, i64 %in.idx1
+ %const2 = getelementptr [8192 x i64], ptr %in.ptr, i64 0, i64 512
+ %idx2 = getelementptr i64, ptr %const2, i64 %in.idx1
+ %const3 = getelementptr [8192 x i64], ptr %in.ptr, i64 0, i64 768
+ %idx3 = getelementptr i64, ptr %const3, i64 %in.idx1
+ %cmp0 = icmp eq i64 %in.idx0, 0
+ br i1 %cmp0, label %bb.1, label %end
+
+bb.1:
+ %val0 = load <8 x i64>, ptr %idx0, align 16
+ %val1 = load <8 x i64>, ptr %idx1, align 16
+ %val2 = load <8 x i64>, ptr %idx2, align 16
+ %val3 = load <8 x i64>, ptr %idx3, align 16
+ call void asm sideeffect "; use $0", "v"(<8 x i64> %val0)
+ call void asm sideeffect "; use $0", "v"(<8 x i64> %val1)
+ call void asm sideeffect "; use $0", "v"(<8 x i64> %val2)
+ call void asm sideeffect "; use $0", "v"(<8 x i64> %val3)
+ br label %end
+
+end:
+ call void asm sideeffect "; use $0", "v"(ptr %idx0)
+ call void asm sideeffect "; use $0", "v"(ptr %idx1)
+ call void asm sideeffect "; use $0", "v"(ptr %idx2)
+ call void asm sideeffect "; use $0", "v"(ptr %idx3)
+ ret void
+}
+
+
+define void @different_type_reorder(ptr %in.ptr, i64 %in.idx0, i64 %in.idx1) {
+; CHECK-LABEL: define void @different_type_reorder(
+; CHECK-SAME: ptr [[IN_PTR:%.*]], i64 [[IN_IDX0:%.*]], i64 [[IN_IDX1:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[BASE:%.*]] = getelementptr i64, ptr [[IN_PTR]], i64 [[IN_IDX0]]
+; CHECK-NEXT: [[IDX0:%.*]] = getelementptr i64, ptr [[BASE]], i64 [[IN_IDX1]]
+; CHECK-NEXT: [[CONST1:%.*]] = getelementptr i8, ptr [[BASE]], i64 256
+; CHECK-NEXT: [[IDX1:%.*]] = getelementptr i64, ptr [[CONST1]], i64 [[IN_IDX1]]
+; CHECK-NEXT: [[CONST2:%.*]] = getelementptr i8, ptr [[BASE]], i64 512
+; CHECK-NEXT: [[IDX2:%.*]] = getelementptr i64, ptr [[CONST2]], i64 [[IN_IDX1]]
+; CHECK-NEXT: [[CONST3:%.*]] = getelementptr i8, ptr [[BASE]], i64 768
+; CHECK-NEXT: [[IDX3:%.*]] = getelementptr i64, ptr [[CONST3]], i64 [[IN_IDX1]]
+; CHECK-NEXT: [[CMP0:%.*]] = icmp eq i64 [[IN_IDX0]], 0
+; CHECK-NEXT: br i1 [[CMP0]], label [[BB_1:%.*]], label [[END:%.*]]
+; CHECK: bb.1:
+; CHECK-NEXT: [[VAL0:%.*]] = load <8 x i64>, ptr [[IDX0]], align 16
+; CHECK-NEXT: [[VAL1:%.*]] = load <8 x i64>, ptr [[IDX1]], align 16
+; CHECK-NEXT: [[VAL2:%.*]] = load <8 x i64>, ptr [[IDX2]], align 16
+; CHECK-NEXT: [[VAL3:%.*]] = load <8 x i64>, ptr [[IDX3]], align 16
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+entry:
+ %base = getelementptr i64, ptr %in.ptr, i64 %in.idx0
+ %idx0 = getelementptr i64, ptr %base, i64 %in.idx1
+ %const1 = getelementptr i8, ptr %base, i64 256
+ %idx1 = getelementptr i64, ptr %const1, i64 %in.idx1
+ %const2 = getelementptr i8, ptr %base, i64 512
+ %idx2 = getelementptr i64, ptr %const2, i64 %in.idx1
+ %const3 = getelementptr i8, ptr %base, i64 768
+ %idx3 = getelementptr i64, ptr %const3, i64 %in.idx1
+ %cmp0 = icmp eq i64 %in.idx0, 0
+ br i1 %cmp0, label %bb.1, label %end
+
+bb.1:
+ %val0 = load <8 x i64>, ptr %idx0, align 16
+ %val1 = load <8 x i64>, ptr %idx1, align 16
+ %val2 = load <8 x i64>, ptr %idx2, align 16
+ %val3 = load <8 x i64>, ptr %idx3, align 16
+ call void asm sideeffect "; use $0", "v"(<8 x i64> %val0)
+ call void asm sideeffect "; use $0", "v"(<8 x i64> %val1)
+ call void asm sideeffect "; use $0", "v"(<8 x i64> %val2)
+ call void asm sideeffect "; use $0", "v"(<8 x i64> %val3)
+ br label %end
+
+end:
+ call void asm sideeffect "; use $0", "v"(ptr %idx0)
+ call void asm sideeffect "; use $0", "v"(ptr %idx1)
+ call void asm sideeffect "; use $0", "v"(ptr %idx2)
+ call void asm sideeffect "; use $0", "v"(ptr %idx3)
+ ret void
+}
diff --git a/llvm/test/Transforms/Util/add-TLI-mappings.ll b/llvm/test/Transforms/Util/add-TLI-mappings.ll
index 7b12de9..d86e44f 100644
--- a/llvm/test/Transforms/Util/add-TLI-mappings.ll
+++ b/llvm/test/Transforms/Util/add-TLI-mappings.ll
@@ -1,4 +1,5 @@
; RUN: opt -mtriple=x86_64-unknown-linux-gnu -vector-library=SVML -passes=inject-tli-mappings -S < %s | FileCheck %s --check-prefixes=COMMON,SVML
+; RUN: opt -mtriple=x86_64-unknown-linux-gnu -vector-library=AMDLIBM -passes=inject-tli-mappings -S < %s | FileCheck %s --check-prefixes=COMMON,AMDLIBM
; RUN: opt -mtriple=powerpc64-unknown-linux-gnu -vector-library=MASSV -passes=inject-tli-mappings -S < %s | FileCheck %s --check-prefixes=COMMON,MASSV
; RUN: opt -mtriple=x86_64-unknown-linux-gnu -vector-library=LIBMVEC-X86 -passes=inject-tli-mappings -S < %s | FileCheck %s --check-prefixes=COMMON,LIBMVEC-X86
; RUN: opt -mtriple=x86_64-unknown-linux-gnu -vector-library=Accelerate -passes=inject-tli-mappings -S < %s | FileCheck %s --check-prefixes=COMMON,ACCELERATE
@@ -13,6 +14,13 @@
; SVML-SAME: ptr @__svml_log10f4,
; SVML-SAME: ptr @__svml_log10f8,
; SVML-SAME: ptr @__svml_log10f16
+; AMDLIBM-SAME: [6 x ptr] [
+; AMDLIBM-SAME: ptr @amd_vrd2_sin,
+; AMDLIBM-SAME: ptr @amd_vrd4_sin,
+; AMDLIBM-SAME: ptr @amd_vrd8_sin,
+; AMDLIBM-SAME: ptr @amd_vrs4_log10f,
+; AMDLIBM-SAME: ptr @amd_vrs8_log10f,
+; AMDLIBM-SAME: ptr @amd_vrs16_log10f
; MASSV-SAME: [2 x ptr] [
; MASSV-SAME: ptr @__sind2,
; MASSV-SAME: ptr @__log10f4
@@ -74,6 +82,7 @@ declare float @modff(float, ptr) #0
define double @sin_f64(double %in) {
; COMMON-LABEL: @sin_f64(
; SVML: call double @sin(double %{{.*}}) #[[SIN:[0-9]+]]
+; AMDLIBM: call double @sin(double %{{.*}}) #[[SIN:[0-9]+]]
; MASSV: call double @sin(double %{{.*}}) #[[SIN:[0-9]+]]
; ACCELERATE: call double @sin(double %{{.*}})
; LIBMVEC-X86: call double @sin(double %{{.*}}) #[[SIN:[0-9]+]]
@@ -130,6 +139,7 @@ declare void @sincospif(float, ptr, ptr) #0
define float @call_llvm.log10.f32(float %in) {
; COMMON-LABEL: @call_llvm.log10.f32(
; SVML: call float @llvm.log10.f32(float %{{.*}})
+; AMDLIBM: call float @llvm.log10.f32(float %{{.*}})
; LIBMVEC-X86: call float @llvm.log10.f32(float %{{.*}})
; MASSV: call float @llvm.log10.f32(float %{{.*}}) #[[LOG10:[0-9]+]]
; ACCELERATE: call float @llvm.log10.f32(float %{{.*}}) #[[LOG10:[0-9]+]]
@@ -137,6 +147,7 @@ define float @call_llvm.log10.f32(float %in) {
; ARMPL: call float @llvm.log10.f32(float %{{.*}}) #[[LOG10:[0-9]+]]
; No mapping of "llvm.log10.f32" to a vector function for SVML.
; SVML-NOT: _ZGV_LLVM_{{.*}}_llvm.log10.f32({{.*}})
+; AMDLIBM-NOT: _ZGV_LLVM_{{.*}}_llvm.log10.f32({{.*}})
; LIBMVEC-X86-NOT: _ZGV_LLVM_{{.*}}_llvm.log10.f32({{.*}})
%call = tail call float @llvm.log10.f32(float %in)
ret float %call
@@ -151,6 +162,13 @@ declare float @llvm.log10.f32(float) #0
; SVML: declare <8 x float> @__svml_log10f8(<8 x float>)
; SVML: declare <16 x float> @__svml_log10f16(<16 x float>)
+; AMDLIBM: declare <2 x double> @amd_vrd2_sin(<2 x double>)
+; AMDLIBM: declare <4 x double> @amd_vrd4_sin(<4 x double>)
+; AMDLIBM: declare <8 x double> @amd_vrd8_sin(<8 x double>)
+; AMDLIBM: declare <4 x float> @amd_vrs4_log10f(<4 x float>)
+; AMDLIBM: declare <8 x float> @amd_vrs8_log10f(<8 x float>)
+; AMDLIBM: declare <16 x float> @amd_vrs16_log10f(<16 x float>)
+
; MASSV: declare <2 x double> @__sind2(<2 x double>)
; MASSV: declare <4 x float> @__log10f4(<4 x float>)
@@ -194,6 +212,11 @@ attributes #0 = { nounwind readnone }
; SVML-SAME: _ZGV_LLVM_N4v_sin(__svml_sin4),
; SVML-SAME: _ZGV_LLVM_N8v_sin(__svml_sin8)" }
+; AMDLIBM: attributes #[[SIN]] = { "vector-function-abi-variant"=
+; AMDLIBM-SAME: "_ZGV_LLVM_N2v_sin(amd_vrd2_sin),
+; AMDLIBM-SAME: _ZGV_LLVM_N4v_sin(amd_vrd4_sin),
+; AMDLIBM-SAME: _ZGV_LLVM_N8v_sin(amd_vrd8_sin)" }
+
; MASSV: attributes #[[SIN]] = { "vector-function-abi-variant"=
; MASSV-SAME: "_ZGV_LLVM_N2v_sin(__sind2)" }
; MASSV: attributes #[[LOG10]] = { "vector-function-abi-variant"=
diff --git a/llvm/test/lit.cfg.py b/llvm/test/lit.cfg.py
index c6f9ee8..8ecae5d 100644
--- a/llvm/test/lit.cfg.py
+++ b/llvm/test/lit.cfg.py
@@ -190,6 +190,7 @@ tools.extend(
"llvm-dis",
"llvm-dwarfdump",
"llvm-dwarfutil",
+ "llvm-dwp",
"llvm-dlltool",
"llvm-exegesis",
"llvm-extract",
diff --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/lanai_isel.ll.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/lanai_isel.ll.expected
index 7d152d9..80145c5 100644
--- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/lanai_isel.ll.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/lanai_isel.ll.expected
@@ -53,7 +53,7 @@ define i64 @i16_test(i16 %i) nounwind readnone {
; CHECK-NEXT: t14: ch,glue = CopyToReg t0, Register:i32 $rv, t33
; CHECK-NEXT: t1: i32 = ADD_I_LO TargetFrameIndex:i32<-1>, TargetConstant:i32<0>
; CHECK-NEXT: t21: i32 = OR_I_LO t1, TargetConstant:i32<2>
-; CHECK-NEXT: t23: i32,ch = LDHz_RI<Mem:(load (s16) from %fixed-stack.0 + 2)> t21, TargetConstant:i32<0>, TargetConstant:i32<0>, t0
+; CHECK-NEXT: t23: i32,ch = LDHz_RI<Mem:(load (s16) from %fixed-stack.0 + 2, basealign 4)> t21, TargetConstant:i32<0>, TargetConstant:i32<0>, t0
; CHECK-NEXT: t22: i32,ch = LDHz_RI<Mem:(dereferenceable load (s16) from %ir.loc)> TargetFrameIndex:i32<0>, TargetConstant:i32<0>, TargetConstant:i32<0>, t0
; CHECK-NEXT: t24: i32 = ADD_R t23, t22, TargetConstant:i32<0>
; CHECK-NEXT: t27: i32 = AND_I_HI t24, TargetConstant:i32<0>
@@ -76,7 +76,7 @@ define i64 @i8_test(i8 %i) nounwind readnone {
; CHECK-NEXT: t14: ch,glue = CopyToReg t0, Register:i32 $rv, t33
; CHECK-NEXT: t1: i32 = ADD_I_LO TargetFrameIndex:i32<-1>, TargetConstant:i32<0>
; CHECK-NEXT: t21: i32 = OR_I_LO t1, TargetConstant:i32<3>
-; CHECK-NEXT: t23: i32,ch = LDBz_RI<Mem:(load (s8) from %fixed-stack.0 + 3)> t21, TargetConstant:i32<0>, TargetConstant:i32<0>, t0
+; CHECK-NEXT: t23: i32,ch = LDBz_RI<Mem:(load (s8) from %fixed-stack.0 + 3, basealign 4)> t21, TargetConstant:i32<0>, TargetConstant:i32<0>, t0
; CHECK-NEXT: t22: i32,ch = LDBz_RI<Mem:(dereferenceable load (s8) from %ir.loc)> TargetFrameIndex:i32<0>, TargetConstant:i32<0>, TargetConstant:i32<0>, t0
; CHECK-NEXT: t24: i32 = ADD_R t23, t22, TargetConstant:i32<0>
; CHECK-NEXT: t26: i32 = SLI TargetConstant:i32<255>
diff --git a/llvm/test/tools/dsymutil/ARM/dwarf5-dwarf4-combination-macho.test b/llvm/test/tools/dsymutil/ARM/dwarf5-dwarf4-combination-macho.test
index 5a37b42..fb15f46 100644
--- a/llvm/test/tools/dsymutil/ARM/dwarf5-dwarf4-combination-macho.test
+++ b/llvm/test/tools/dsymutil/ARM/dwarf5-dwarf4-combination-macho.test
@@ -221,8 +221,8 @@ CHECK-NEXT: Name Index @ 0x0 {
CHECK-NEXT: Header {
; FIXME: when the parallel dwarf linker is able to generate DW_IDX_parent,
; these headers should be the same.
-WITH-PARENTS-NEXT: Length: 0xC8
-NO-PARENTS-NEXT: Length: 0xC4
+WITH-PARENTS-NEXT: Length: 0xC0
+NO-PARENTS-NEXT: Length: 0xBC
CHECK-NEXT: Format: DWARF32
CHECK-NEXT: Version: 5
CHECK-NEXT: CU count: 2
@@ -230,7 +230,7 @@ CHECK-NEXT: Local TU count: 0
CHECK-NEXT: Foreign TU count: 0
CHECK-NEXT: Bucket count: 5
CHECK-NEXT: Name count: 5
-WITH-PARENTS-NEXT: Abbreviations table size: 0x17
-NO-PARENTS-NEXT: Abbreviations table size: 0x13
+WITH-PARENTS-NEXT: Abbreviations table size: 0x15
+NO-PARENTS-NEXT: Abbreviations table size: 0x11
CHECK-NEXT: Augmentation: 'LLVM0700'
CHECK-NEXT: }
diff --git a/llvm/test/tools/llvm-dwarfdump/X86/verify_file_encoding.yaml b/llvm/test/tools/llvm-dwarfdump/X86/verify_file_encoding.yaml
index fe31436..4afb775 100644
--- a/llvm/test/tools/llvm-dwarfdump/X86/verify_file_encoding.yaml
+++ b/llvm/test/tools/llvm-dwarfdump/X86/verify_file_encoding.yaml
@@ -51,6 +51,8 @@
# CHECK-NEXT: DW_AT_call_file [DW_FORM_sdata] (4)
# CHECK-NEXT: DW_AT_call_line [DW_FORM_sdata] (5){{[[:space:]]}}
# CHECK-NEXT: Verifying dwo Units...
+# CHECK-NEXT: Verifying .debug_line...
+# CHECK-NEXT: Verifying .debug_str_offsets...
# CHECK-NEXT: error: Aggregated error counts:
# CHECK-NEXT: error: Invalid encoding in DW_AT_decl_file occurred 4 time(s).
# CHECK-NEXT: error: Invalid file index in DW_AT_call_line occurred 1 time(s).
diff --git a/llvm/test/tools/llvm-dwarfutil/ELF/X86/dwarf5-macro.test b/llvm/test/tools/llvm-dwarfutil/ELF/X86/dwarf5-macro.test
index 518244a..0c5cbe4 100644
--- a/llvm/test/tools/llvm-dwarfutil/ELF/X86/dwarf5-macro.test
+++ b/llvm/test/tools/llvm-dwarfutil/ELF/X86/dwarf5-macro.test
@@ -45,12 +45,15 @@
## Check that macro table preserved during simple copying.
#
+# FIXME: the input of this test is itself invalid w.r.t. debug_str_offsets,
+# which also causes the next two calls to --verify to fail, so we only verify
+# debug_info on those.
#RUN: llvm-dwarfutil --no-garbage-collection %p/Inputs/dwarf5-macro.out %t1
-#RUN: llvm-dwarfdump -verify %t1 | FileCheck %s
+#RUN: llvm-dwarfdump -verify --debug-info %t1 | FileCheck %s
#RUN: llvm-dwarfdump -a %t1 | FileCheck %s --check-prefix=MACRO
#RUN: llvm-dwarfutil --linker parallel --no-garbage-collection %p/Inputs/dwarf5-macro.out %t1
-#RUN: llvm-dwarfdump -verify %t1 | FileCheck %s
+#RUN: llvm-dwarfdump -verify %t1 --debug-info | FileCheck %s
#RUN: llvm-dwarfdump -a %t1 | FileCheck %s --check-prefix=MACRO
## Check that macro table preserved during updating accelerator tables.
diff --git a/llvm/test/tools/llvm-mca/AArch64/Ampere/Ampere1B/basic-instructions.s b/llvm/test/tools/llvm-mca/AArch64/Ampere/Ampere1B/basic-instructions.s
new file mode 100644
index 0000000..7dd05eb
--- /dev/null
+++ b/llvm/test/tools/llvm-mca/AArch64/Ampere/Ampere1B/basic-instructions.s
@@ -0,0 +1,3724 @@
+# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
+# RUN: llvm-mca -mtriple=aarch64 -mcpu=ampere1b -instruction-tables < %s | FileCheck %s
+
+#------------------------------------------------------------------------------
+# Add/sub (immediate)
+#------------------------------------------------------------------------------
+
+add w2, w3, #4095
+add w30, w29, #1, lsl #12
+add w13, w5, #4095, lsl #12
+add x5, x7, #1638
+add w20, wsp, #801
+add wsp, wsp, #1104
+add wsp, w30, #4084
+add x0, x24, #291
+add x3, x24, #4095, lsl #12
+add x8, sp, #1074
+add sp, x29, #3816
+sub w0, wsp, #4077
+sub w4, w20, #546, lsl #12
+sub sp, sp, #288
+sub wsp, w19, #16
+adds w13, w23, #291, lsl #12
+cmn w2, #4095
+adds w20, wsp, #0
+cmn x3, #1, lsl #12
+cmp sp, #20, lsl #12
+cmp x30, #4095
+subs x4, sp, #3822
+cmn w3, #291, lsl #12
+cmn wsp, #1365
+cmn sp, #1092, lsl #12
+mov sp, x30
+mov wsp, w20
+mov x11, sp
+mov w24, wsp
+
+#------------------------------------------------------------------------------
+# Add-subtract (shifted register)
+#------------------------------------------------------------------------------
+
+add w3, w5, w7
+add wzr, w3, w5
+add w20, wzr, w4
+add w4, w6, wzr
+add w11, w13, w15
+add w9, w3, wzr, lsl #10
+add w17, w29, w20, lsl #31
+add w21, w22, w23, lsr #0
+add w24, w25, w26, lsr #18
+add w27, w28, w29, lsr #31
+add w2, w3, w4, asr #0
+add w5, w6, w7, asr #21
+add w8, w9, w10, asr #31
+add x3, x5, x7
+add xzr, x3, x5
+add x20, xzr, x4
+add x4, x6, xzr
+add x11, x13, x15
+add x9, x3, xzr, lsl #10
+add x17, x29, x20, lsl #63
+add x21, x22, x23, lsr #0
+add x24, x25, x26, lsr #18
+add x27, x28, x29, lsr #63
+add x2, x3, x4, asr #0
+add x5, x6, x7, asr #21
+add x8, x9, x10, asr #63
+adds w3, w5, w7
+cmn w3, w5
+adds w20, wzr, w4
+adds w4, w6, wzr
+adds w11, w13, w15
+adds w9, w3, wzr, lsl #10
+adds w17, w29, w20, lsl #31
+adds w21, w22, w23, lsr #0
+adds w24, w25, w26, lsr #18
+adds w27, w28, w29, lsr #31
+adds w2, w3, w4, asr #0
+adds w5, w6, w7, asr #21
+adds w8, w9, w10, asr #31
+adds x3, x5, x7
+cmn x3, x5
+adds x20, xzr, x4
+adds x4, x6, xzr
+adds x11, x13, x15
+adds x9, x3, xzr, lsl #10
+adds x17, x29, x20, lsl #63
+adds x21, x22, x23, lsr #0
+adds x24, x25, x26, lsr #18
+adds x27, x28, x29, lsr #63
+adds x2, x3, x4, asr #0
+adds x5, x6, x7, asr #21
+adds x8, x9, x10, asr #63
+sub w3, w5, w7
+sub wzr, w3, w5
+sub w4, w6, wzr
+sub w11, w13, w15
+sub w9, w3, wzr, lsl #10
+sub w17, w29, w20, lsl #31
+sub w21, w22, w23, lsr #0
+sub w24, w25, w26, lsr #18
+sub w27, w28, w29, lsr #31
+sub w2, w3, w4, asr #0
+sub w5, w6, w7, asr #21
+sub w8, w9, w10, asr #31
+sub x3, x5, x7
+sub xzr, x3, x5
+sub x4, x6, xzr
+sub x11, x13, x15
+sub x9, x3, xzr, lsl #10
+sub x17, x29, x20, lsl #63
+sub x21, x22, x23, lsr #0
+sub x24, x25, x26, lsr #18
+sub x27, x28, x29, lsr #63
+sub x2, x3, x4, asr #0
+sub x5, x6, x7, asr #21
+sub x8, x9, x10, asr #63
+subs w3, w5, w7
+cmp w3, w5
+subs w4, w6, wzr
+subs w11, w13, w15
+subs w9, w3, wzr, lsl #10
+subs w17, w29, w20, lsl #31
+subs w21, w22, w23, lsr #0
+subs w24, w25, w26, lsr #18
+subs w27, w28, w29, lsr #31
+subs w2, w3, w4, asr #0
+subs w5, w6, w7, asr #21
+subs w8, w9, w10, asr #31
+subs x3, x5, x7
+cmp x3, x5
+subs x4, x6, xzr
+subs x11, x13, x15
+subs x9, x3, xzr, lsl #10
+subs x17, x29, x20, lsl #63
+subs x21, x22, x23, lsr #0
+subs x24, x25, x26, lsr #18
+subs x27, x28, x29, lsr #63
+subs x2, x3, x4, asr #0
+subs x5, x6, x7, asr #21
+subs x8, x9, x10, asr #63
+cmn wzr, w4
+cmn w5, wzr
+cmn w6, w7
+cmn w8, w9, lsl #15
+cmn w10, w11, lsl #31
+cmn w12, w13, lsr #0
+cmn w14, w15, lsr #21
+cmn w16, w17, lsr #31
+cmn w18, w19, asr #0
+cmn w20, w21, asr #22
+cmn w22, w23, asr #31
+cmn x0, x3
+cmn xzr, x4
+cmn x5, xzr
+cmn x6, x7
+cmn x8, x9, lsl #15
+cmn x10, x11, lsl #63
+cmn x12, x13, lsr #0
+cmn x14, x15, lsr #41
+cmn x16, x17, lsr #63
+cmn x18, x19, asr #0
+cmn x20, x21, asr #55
+cmn x22, x23, asr #63
+cmp w0, w3
+cmp wzr, w4
+cmp w5, wzr
+cmp w6, w7
+cmp w8, w9, lsl #15
+cmp w10, w11, lsl #31
+cmp w12, w13, lsr #0
+cmp w14, w15, lsr #21
+cmp w18, w19, asr #0
+cmp w20, w21, asr #22
+cmp w22, w23, asr #31
+cmp x0, x3
+cmp xzr, x4
+cmp x5, xzr
+cmp x6, x7
+cmp x8, x9, lsl #15
+cmp x10, x11, lsl #63
+cmp x12, x13, lsr #0
+cmp x14, x15, lsr #41
+cmp x16, x17, lsr #63
+cmp x18, x19, asr #0
+cmp x20, x21, asr #55
+cmp x22, x23, asr #63
+cmp wzr, w0
+cmp xzr, x0
+
+#------------------------------------------------------------------------------
+# Add-subtract (shifted register)
+#------------------------------------------------------------------------------
+
+adc w29, w27, w25
+adc wzr, w3, w4
+adc w9, wzr, w10
+adc w20, w0, wzr
+adc x29, x27, x25
+adc xzr, x3, x4
+adc x9, xzr, x10
+adc x20, x0, xzr
+adcs w29, w27, w25
+adcs wzr, w3, w4
+adcs w9, wzr, w10
+adcs w20, w0, wzr
+adcs x29, x27, x25
+adcs xzr, x3, x4
+adcs x9, xzr, x10
+adcs x20, x0, xzr
+sbc w29, w27, w25
+sbc wzr, w3, w4
+ngc w9, w10
+sbc w20, w0, wzr
+sbc x29, x27, x25
+sbc xzr, x3, x4
+ngc x9, x10
+sbc x20, x0, xzr
+sbcs w29, w27, w25
+sbcs wzr, w3, w4
+ngcs w9, w10
+sbcs w20, w0, wzr
+sbcs x29, x27, x25
+sbcs xzr, x3, x4
+ngcs x9, x10
+sbcs x20, x0, xzr
+ngc w3, w12
+ngc wzr, w9
+ngc w23, wzr
+ngc x29, x30
+ngc xzr, x0
+ngc x0, xzr
+ngcs w3, w12
+ngcs wzr, w9
+ngcs w23, wzr
+ngcs x29, x30
+ngcs xzr, x0
+ngcs x0, xzr
+
+#------------------------------------------------------------------------------
+# Compare and branch (immediate)
+#------------------------------------------------------------------------------
+
+sbfx x1, x2, #3, #2
+asr x3, x4, #63
+asr wzr, wzr, #31
+sbfx w12, w9, #0, #1
+ubfiz x4, x5, #52, #11
+ubfx xzr, x4, #0, #1
+ubfiz x4, xzr, #1, #6
+lsr x5, x6, #12
+bfi x4, x5, #52, #11
+bfxil xzr, x4, #0, #1
+bfi x4, xzr, #1, #6
+bfxil x5, x6, #12, #52
+sxtb w1, w2
+sxtb xzr, w3
+sxth w9, w10
+sxth x0, w1
+sxtw x3, w30
+uxtb w1, w2
+uxth w9, w10
+ubfx x3, x30, #0, #32
+asr w3, w2, #0
+asr w9, w10, #31
+asr x20, x21, #63
+asr w1, wzr, #3
+lsr w3, w2, #0
+lsr w9, w10, #31
+lsr x20, x21, #63
+lsr wzr, wzr, #3
+lsr w3, w2, #0
+lsl w9, w10, #31
+lsl x20, x21, #63
+lsl w1, wzr, #3
+sbfx w9, w10, #0, #1
+sbfiz x2, x3, #63, #1
+asr x19, x20, #0
+sbfiz x9, x10, #5, #59
+asr w9, w10, #0
+sbfiz w11, w12, #31, #1
+sbfiz w13, w14, #29, #3
+sbfiz xzr, xzr, #10, #11
+sbfx w9, w10, #0, #1
+asr x2, x3, #63
+asr x19, x20, #0
+asr x9, x10, #5
+asr w9, w10, #0
+asr w11, w12, #31
+asr w13, w14, #29
+sbfx xzr, xzr, #10, #11
+bfxil w9, w10, #0, #1
+bfi x2, x3, #63, #1
+bfxil x19, x20, #0, #64
+bfi x9, x10, #5, #59
+bfxil w9, w10, #0, #32
+bfi w11, w12, #31, #1
+bfi w13, w14, #29, #3
+bfi xzr, xzr, #10, #11
+bfxil w9, w10, #0, #1
+bfxil x2, x3, #63, #1
+bfxil x19, x20, #0, #64
+bfxil x9, x10, #5, #59
+bfxil w9, w10, #0, #32
+bfxil w11, w12, #31, #1
+bfxil w13, w14, #29, #3
+bfxil xzr, xzr, #10, #11
+ubfx w9, w10, #0, #1
+lsl x2, x3, #63
+lsr x19, x20, #0
+lsl x9, x10, #5
+lsr w9, w10, #0
+lsl w11, w12, #31
+lsl w13, w14, #29
+ubfiz xzr, xzr, #10, #11
+ubfx w9, w10, #0, #1
+lsr x2, x3, #63
+lsr x19, x20, #0
+lsr x9, x10, #5
+lsr w9, w10, #0
+lsr w11, w12, #31
+lsr w13, w14, #29
+ubfx xzr, xzr, #10, #11
+
+#------------------------------------------------------------------------------
+# Compare and branch (immediate)
+#------------------------------------------------------------------------------
+
+cbz w5, #4
+cbz x5, #0
+cbnz x2, #-4
+cbnz x26, #1048572
+cbz wzr, #0
+cbnz xzr, #0
+
+#------------------------------------------------------------------------------
+# Conditional branch (immediate)
+#------------------------------------------------------------------------------
+
+b.ne #4
+b.ge #1048572
+b.ge #-4
+
+#------------------------------------------------------------------------------
+# Conditional compare (immediate)
+#------------------------------------------------------------------------------
+
+ccmp w1, #31, #0, eq
+ccmp w3, #0, #15, hs
+ccmp wzr, #15, #13, hs
+ccmp x9, #31, #0, le
+ccmp x3, #0, #15, gt
+ccmp xzr, #5, #7, ne
+ccmn w1, #31, #0, eq
+ccmn w3, #0, #15, hs
+ccmn wzr, #15, #13, hs
+ccmn x9, #31, #0, le
+ccmn x3, #0, #15, gt
+ccmn xzr, #5, #7, ne
+
+#------------------------------------------------------------------------------
+# Conditional compare (register)
+#------------------------------------------------------------------------------
+
+ccmp w1, wzr, #0, eq
+ccmp w3, w0, #15, hs
+ccmp wzr, w15, #13, hs
+ccmp x9, xzr, #0, le
+ccmp x3, x0, #15, gt
+ccmp xzr, x5, #7, ne
+ccmn w1, wzr, #0, eq
+ccmn w3, w0, #15, hs
+ccmn wzr, w15, #13, hs
+ccmn x9, xzr, #0, le
+ccmn x3, x0, #15, gt
+ccmn xzr, x5, #7, ne
+
+#------------------------------------------------------------------------------
+# Conditional branch (immediate)
+#------------------------------------------------------------------------------
+
+csel w1, w0, w19, ne
+csel wzr, w5, w9, eq
+csel w9, wzr, w30, gt
+csel w1, w28, wzr, mi
+csel x19, x23, x29, lt
+csel xzr, x3, x4, ge
+csel x5, xzr, x6, hs
+csel x7, x8, xzr, lo
+csinc w1, w0, w19, ne
+csinc wzr, w5, w9, eq
+csinc w9, wzr, w30, gt
+csinc w1, w28, wzr, mi
+csinc x19, x23, x29, lt
+csinc xzr, x3, x4, ge
+csinc x5, xzr, x6, hs
+csinc x7, x8, xzr, lo
+csinv w1, w0, w19, ne
+csinv wzr, w5, w9, eq
+csinv w9, wzr, w30, gt
+csinv w1, w28, wzr, mi
+csinv x19, x23, x29, lt
+csinv xzr, x3, x4, ge
+csinv x5, xzr, x6, hs
+csinv x7, x8, xzr, lo
+csneg w1, w0, w19, ne
+csneg wzr, w5, w9, eq
+csneg w9, wzr, w30, gt
+csneg w1, w28, wzr, mi
+csneg x19, x23, x29, lt
+csneg xzr, x3, x4, ge
+csneg x5, xzr, x6, hs
+csneg x7, x8, xzr, lo
+cset w3, eq
+cset x9, pl
+csetm w20, ne
+csetm x30, ge
+csinc w2, wzr, wzr, al
+csinv x3, xzr, xzr, nv
+cinc w3, w5, gt
+cinc wzr, w4, le
+cset w9, lt
+cinc x3, x5, gt
+cinc xzr, x4, le
+cset x9, lt
+csinc w5, w6, w6, nv
+csinc x1, x2, x2, al
+cinv w3, w5, gt
+cinv wzr, w4, le
+csetm w9, lt
+cinv x3, x5, gt
+cinv xzr, x4, le
+csetm x9, lt
+csinv x1, x0, x0, al
+csinv w9, w8, w8, nv
+cneg w3, w5, gt
+cneg wzr, w4, le
+cneg w9, wzr, lt
+cneg x3, x5, gt
+cneg xzr, x4, le
+cneg x9, xzr, lt
+csneg x4, x8, x8, al
+csinv w9, w8, w8, nv
+
+#------------------------------------------------------------------------------
+# Data-processing (1 source)
+#------------------------------------------------------------------------------
+
+rbit w0, w7
+rbit x18, x3
+rev16 w17, w1
+rev16 x5, x2
+rev w18, w0
+rev32 x20, x1
+rev x22, x2
+clz w24, w3
+clz x26, x4
+cls w3, w5
+cls x20, x5
+
+#------------------------------------------------------------------------------
+# Data-processing (2 source)
+#------------------------------------------------------------------------------
+
+udiv w0, w7, w10
+udiv x9, x22, x4
+sdiv w12, w21, w0
+sdiv x13, x2, x1
+lsl w11, w12, w13
+lsl x14, x15, x16
+lsr w17, w18, w19
+lsr x20, x21, x22
+asr w23, w24, w25
+asr x26, x27, x28
+ror w0, w1, w2
+ror x3, x4, x5
+lsl w6, w7, w8
+lsl x9, x10, x11
+lsr w12, w13, w14
+lsr x15, x16, x17
+asr w18, w19, w20
+asr x21, x22, x23
+ror w24, w25, w26
+ror x27, x28, x29
+
+#------------------------------------------------------------------------------
+# Data-processing (3 sources)
+#------------------------------------------------------------------------------
+
+smulh x30, x29, x28
+smulh xzr, x27, x26
+umulh x30, x29, x28
+umulh x23, x30, xzr
+madd w1, w3, w7, w4
+madd wzr, w0, w9, w11
+madd w13, wzr, w4, w4
+madd w19, w30, wzr, w29
+mul w4, w5, w6
+madd x1, x3, x7, x4
+madd xzr, x0, x9, x11
+madd x13, xzr, x4, x4
+madd x19, x30, xzr, x29
+mul x4, x5, x6
+msub w1, w3, w7, w4
+msub wzr, w0, w9, w11
+msub w13, wzr, w4, w4
+msub w19, w30, wzr, w29
+mneg w4, w5, w6
+msub x1, x3, x7, x4
+msub xzr, x0, x9, x11
+msub x13, xzr, x4, x4
+msub x19, x30, xzr, x29
+mneg x4, x5, x6
+smaddl x3, w5, w2, x9
+smaddl xzr, w10, w11, x12
+smaddl x13, wzr, w14, x15
+smaddl x16, w17, wzr, x18
+smull x19, w20, w21
+smsubl x3, w5, w2, x9
+smsubl xzr, w10, w11, x12
+smsubl x13, wzr, w14, x15
+smsubl x16, w17, wzr, x18
+smnegl x19, w20, w21
+umaddl x3, w5, w2, x9
+umaddl xzr, w10, w11, x12
+umaddl x13, wzr, w14, x15
+umaddl x16, w17, wzr, x18
+umull x19, w20, w21
+umsubl x3, w5, w2, x9
+umsubl x16, w17, wzr, x18
+umnegl x19, w20, w21
+smulh x30, x29, x28
+smulh x23, x22, xzr
+umulh x23, x22, xzr
+mul x19, x20, xzr
+mneg w21, w22, w23
+smull x11, w13, w17
+umull x11, w13, w17
+smnegl x11, w13, w17
+umnegl x11, w13, w17
+
+#------------------------------------------------------------------------------
+# Extract (immediate)
+#------------------------------------------------------------------------------
+
+extr w3, w5, w7, #0
+extr w11, w13, w17, #31
+extr x3, x5, x7, #15
+extr x11, x13, x17, #63
+ror x19, x23, #24
+ror x29, xzr, #63
+ror w9, w13, #31
+
+#------------------------------------------------------------------------------
+# Floating-point compare
+#------------------------------------------------------------------------------
+
+fcmp s3, s5
+fcmp s31, #0.0
+fcmp s31, #0.0
+fcmpe s29, s30
+fcmpe s15, #0.0
+fcmpe s15, #0.0
+fcmp d4, d12
+fcmp d23, #0.0
+fcmp d23, #0.0
+fcmpe d26, d22
+fcmpe d29, #0.0
+fcmpe d29, #0.0
+
+#------------------------------------------------------------------------------
+# Floating-point conditional compare
+#------------------------------------------------------------------------------
+
+fccmp s1, s31, #0, eq
+fccmp s3, s0, #15, hs
+fccmp s31, s15, #13, hs
+fccmp d9, d31, #0, le
+fccmp d3, d0, #15, gt
+fccmp d31, d5, #7, ne
+fccmpe s1, s31, #0, eq
+fccmpe s3, s0, #15, hs
+fccmpe s31, s15, #13, hs
+fccmpe d9, d31, #0, le
+fccmpe d3, d0, #15, gt
+fccmpe d31, d5, #7, ne
+
+#-------------------------------------------------------------------------------
+# Floating-point conditional compare
+#-------------------------------------------------------------------------------
+
+fcsel s3, s20, s9, pl
+fcsel d9, d10, d11, mi
+
+#------------------------------------------------------------------------------
+# Floating-point data-processing (1 source)
+#------------------------------------------------------------------------------
+
+fmov s0, s1
+fabs s2, s3
+fneg s4, s5
+fsqrt s6, s7
+fcvt d8, s9
+fcvt h10, s11
+frintn s12, s13
+frintp s14, s15
+frintm s16, s17
+frintz s18, s19
+frinta s20, s21
+frintx s22, s23
+frinti s24, s25
+fmov d0, d1
+fabs d2, d3
+fneg d4, d5
+fsqrt d6, d7
+fcvt s8, d9
+fcvt h10, d11
+frintn d12, d13
+frintp d14, d15
+frintm d16, d17
+frintz d18, d19
+frinta d20, d21
+frintx d22, d23
+frinti d24, d25
+fcvt s26, h27
+fcvt d28, h29
+
+#------------------------------------------------------------------------------
+# Floating-point data-processing (2 sources)
+#------------------------------------------------------------------------------
+
+fmul s20, s19, s17
+fdiv s1, s2, s3
+fadd s4, s5, s6
+fsub s7, s8, s9
+fmax s10, s11, s12
+fmin s13, s14, s15
+fmaxnm s16, s17, s18
+fminnm s19, s20, s21
+fnmul s22, s23, s2
+fmul d20, d19, d17
+fdiv d1, d2, d3
+fadd d4, d5, d6
+fsub d7, d8, d9
+fmax d10, d11, d12
+fmin d13, d14, d15
+fmaxnm d16, d17, d18
+fminnm d19, d20, d21
+fnmul d22, d23, d24
+
+#------------------------------------------------------------------------------
+# Floating-point data-processing (1 source)
+#------------------------------------------------------------------------------
+
+fmadd s3, s5, s6, s31
+fmadd d3, d13, d0, d23
+fmsub s3, s5, s6, s31
+fmsub d3, d13, d0, d23
+fnmadd s3, s5, s6, s31
+fnmadd d3, d13, d0, d23
+fnmsub s3, s5, s6, s31
+fnmsub d3, d13, d0, d23
+
+#------------------------------------------------------------------------------
+# Floating-point <-> fixed-point conversion
+#------------------------------------------------------------------------------
+
+fcvtzs w3, h5, #1
+fcvtzs wzr, h20, #13
+fcvtzs w19, h0, #32
+fcvtzs x3, h5, #1
+fcvtzs x12, h30, #45
+fcvtzs x19, h0, #64
+fcvtzs w3, s5, #1
+fcvtzs wzr, s20, #13
+fcvtzs w19, s0, #32
+fcvtzs x3, s5, #1
+fcvtzs x12, s30, #45
+fcvtzs x19, s0, #64
+fcvtzs w3, d5, #1
+fcvtzs wzr, d20, #13
+fcvtzs w19, d0, #32
+fcvtzs x3, d5, #1
+fcvtzs x12, d30, #45
+fcvtzs x19, d0, #64
+fcvtzu w3, h5, #1
+fcvtzu wzr, h20, #13
+fcvtzu w19, h0, #32
+fcvtzu x3, h5, #1
+fcvtzu x12, h30, #45
+fcvtzu x19, h0, #64
+fcvtzu w3, s5, #1
+fcvtzu wzr, s20, #13
+fcvtzu w19, s0, #32
+fcvtzu x3, s5, #1
+fcvtzu x12, s30, #45
+fcvtzu x19, s0, #64
+fcvtzu w3, d5, #1
+fcvtzu wzr, d20, #13
+fcvtzu w19, d0, #32
+fcvtzu x3, d5, #1
+fcvtzu x12, d30, #45
+fcvtzu x19, d0, #64
+scvtf h23, w19, #1
+scvtf h31, wzr, #20
+scvtf h14, w0, #32
+scvtf h23, x19, #1
+scvtf h31, xzr, #20
+scvtf h14, x0, #64
+scvtf s23, w19, #1
+scvtf s31, wzr, #20
+scvtf s14, w0, #32
+scvtf s23, x19, #1
+scvtf s31, xzr, #20
+scvtf s14, x0, #64
+scvtf d23, w19, #1
+scvtf d31, wzr, #20
+scvtf d14, w0, #32
+scvtf d23, x19, #1
+scvtf d31, xzr, #20
+scvtf d14, x0, #64
+ucvtf h23, w19, #1
+ucvtf h31, wzr, #20
+ucvtf h14, w0, #32
+ucvtf h23, x19, #1
+ucvtf h31, xzr, #20
+ucvtf h14, x0, #64
+ucvtf s23, w19, #1
+ucvtf s31, wzr, #20
+ucvtf s14, w0, #32
+ucvtf s23, x19, #1
+ucvtf s31, xzr, #20
+ucvtf s14, x0, #64
+ucvtf d23, w19, #1
+ucvtf d31, wzr, #20
+ucvtf d14, w0, #32
+ucvtf d23, x19, #1
+ucvtf d31, xzr, #20
+ucvtf d14, x0, #64
+
+#------------------------------------------------------------------------------
+# Floating-point <-> integer conversion
+#------------------------------------------------------------------------------
+
+fcvtns w3, h31
+fcvtns xzr, h12
+fcvtnu wzr, h12
+fcvtnu x0, h0
+fcvtps wzr, h9
+fcvtps x12, h20
+fcvtpu w30, h23
+fcvtpu x29, h3
+fcvtms w2, h3
+fcvtms x4, h5
+fcvtmu w6, h7
+fcvtmu x8, h9
+fcvtzs w10, h11
+fcvtzs x12, h13
+fcvtzu w14, h15
+fcvtzu x15, h16
+scvtf h17, w18
+scvtf h19, x20
+ucvtf h21, w22
+scvtf h23, x24
+fcvtas w25, h26
+fcvtas x27, h28
+fcvtau w29, h30
+fcvtau xzr, h0
+fcvtns w3, s31
+fcvtns xzr, s12
+fcvtnu wzr, s12
+fcvtnu x0, s0
+fcvtps wzr, s9
+fcvtps x12, s20
+fcvtpu w30, s23
+fcvtpu x29, s3
+fcvtms w2, s3
+fcvtms x4, s5
+fcvtmu w6, s7
+fcvtmu x8, s9
+fcvtzs w10, s11
+fcvtzs x12, s13
+fcvtzu w14, s15
+fcvtzu x15, s16
+scvtf s17, w18
+scvtf s19, x20
+ucvtf s21, w22
+scvtf s23, x24
+fcvtas w25, s26
+fcvtas x27, s28
+fcvtau w29, s30
+fcvtau xzr, s0
+fcvtns w3, d31
+fcvtns xzr, d12
+fcvtnu wzr, d12
+fcvtnu x0, d0
+fcvtps wzr, d9
+fcvtps x12, d20
+fcvtpu w30, d23
+fcvtpu x29, d3
+fcvtms w2, d3
+fcvtms x4, d5
+fcvtmu w6, d7
+fcvtmu x8, d9
+fcvtzs w10, d11
+fcvtzs x12, d13
+fcvtzu w14, d15
+fcvtzu x15, d16
+scvtf d17, w18
+scvtf d19, x20
+ucvtf d21, w22
+ucvtf d23, x24
+fcvtas w25, d26
+fcvtas x27, d28
+fcvtau w29, d30
+fcvtau xzr, d0
+fmov w3, s9
+fmov s9, w3
+fmov x20, d31
+fmov d1, x15
+fmov x3, v12.d[1]
+fmov v1.d[1], x19
+
+#------------------------------------------------------------------------------
+# Floating-point immediate
+#------------------------------------------------------------------------------
+
+fmov s2, #0.12500000
+fmov s3, #1.00000000
+fmov d30, #16.00000000
+fmov s4, #1.06250000
+fmov d10, #1.93750000
+fmov s12, #-1.00000000
+fmov d16, #8.50000000
+
+#------------------------------------------------------------------------------
+# Load-register (literal)
+#------------------------------------------------------------------------------
+
+ldr w3, #0
+ldr x29, #4
+ldrsw xzr, #-4
+ldr s0, #8
+ldr d0, #1048572
+ldr q0, #-1048576
+prfm pldl1strm, #0
+prfm #22, #0
+
+#------------------------------------------------------------------------------
+# Load/store exclusive
+#------------------------------------------------------------------------------
+
+stxrb w18, w8, [sp]
+stxrh w24, w15, [x16]
+stxr w5, w6, [x17]
+stxr w1, x10, [x21]
+ldxrb w30, [x0]
+ldxrh w17, [x4]
+ldxr w22, [sp]
+ldxr x11, [x29]
+ldxr x11, [x29]
+ldxr x11, [x29]
+stxp w12, w11, w10, [sp]
+stxp wzr, x27, x9, [x12]
+ldxp w0, wzr, [sp]
+ldxp x17, x0, [x18]
+ldxp x17, x0, [x18]
+stlxrb w12, w22, [x0]
+stlxrh w10, w1, [x1]
+stlxr w9, w2, [x2]
+stlxr w9, x3, [sp]
+ldaxrb w8, [x4]
+ldaxrh w7, [x5]
+ldaxr w6, [sp]
+ldaxr x5, [x6]
+ldaxr x5, [x6]
+ldaxr x5, [x6]
+stlxp w4, w5, w6, [sp]
+stlxp wzr, x6, x7, [x1]
+ldaxp w5, w18, [sp]
+ldaxp x6, x19, [x22]
+ldaxp x6, x19, [x22]
+stlrb w24, [sp]
+stlrh w25, [x30]
+stlr w26, [x29]
+stlr x27, [x28]
+stlr x27, [x28]
+stlr x27, [x28]
+ldarb w23, [sp]
+ldarh w22, [x30]
+ldar wzr, [x29]
+ldar x21, [x28]
+ldar x21, [x28]
+ldar x21, [x28]
+
+#------------------------------------------------------------------------------
+# Load/store (unscaled immediate)
+#------------------------------------------------------------------------------
+
+sturb w9, [sp]
+sturh wzr, [x12, #255]
+stur w16, [x0, #-256]
+stur x28, [x14, #1]
+ldurb w1, [x20, #255]
+ldurh w20, [x1, #255]
+ldur w12, [sp, #255]
+ldur xzr, [x12, #255]
+ldursb x9, [x7, #-256]
+ldursh x17, [x19, #-256]
+ldursw x20, [x15, #-256]
+prfum pldl2keep, [sp, #-256]
+ldursb w19, [x1, #-256]
+ldursh w15, [x21, #-256]
+stur b0, [sp, #1]
+stur h12, [x12, #-1]
+stur s15, [x0, #255]
+stur d31, [x5, #25]
+stur q9, [x5]
+ldur b3, [sp]
+ldur h5, [x4, #-256]
+ldur s7, [x12, #-1]
+ldur d11, [x19, #4]
+ldur q13, [x1, #2]
+
+#------------------------------------------------------------------------------
+# Load/store (immediate post-indexed)
+#------------------------------------------------------------------------------
+
+strb w9, [x2], #255
+strb w10, [x3], #1
+strb w10, [x3], #-256
+strh w9, [x2], #255
+strh w9, [x2], #1
+strh w10, [x3], #-256
+str w19, [sp], #255
+str w20, [x30], #1
+str w21, [x12], #-256
+str xzr, [x9], #255
+str x2, [x3], #1
+str x19, [x12], #-256
+ldrb w9, [x2], #255
+ldrb w10, [x3], #1
+ldrb w10, [x3], #-256
+ldrh w9, [x2], #255
+ldrh w9, [x2], #1
+ldrh w10, [x3], #-256
+ldr w19, [sp], #255
+ldr w20, [x30], #1
+ldr w21, [x12], #-256
+ldr xzr, [x9], #255
+ldr x2, [x3], #1
+ldr x19, [x12], #-256
+ldrsb xzr, [x9], #255
+ldrsb x2, [x3], #1
+ldrsb x19, [x12], #-256
+ldrsh xzr, [x9], #255
+ldrsh x2, [x3], #1
+ldrsh x19, [x12], #-256
+ldrsw xzr, [x9], #255
+ldrsw x2, [x3], #1
+ldrsw x19, [x12], #-256
+ldrsb wzr, [x9], #255
+ldrsb w2, [x3], #1
+ldrsb w19, [x12], #-256
+ldrsh wzr, [x9], #255
+ldrsh w2, [x3], #1
+ldrsh w19, [x12], #-256
+str b0, [x0], #255
+str b3, [x3], #1
+str b5, [sp], #-256
+str h10, [x10], #255
+str h13, [x23], #1
+str h15, [sp], #-256
+str s20, [x20], #255
+str s23, [x23], #1
+str s25, [x0], #-256
+str d20, [x20], #255
+str d23, [x23], #1
+str d25, [x0], #-256
+ldr b0, [x0], #255
+ldr b3, [x3], #1
+ldr b5, [sp], #-256
+ldr h10, [x10], #255
+ldr h13, [x23], #1
+ldr h15, [sp], #-256
+ldr s20, [x20], #255
+ldr s23, [x23], #1
+ldr s25, [x0], #-256
+ldr d20, [x20], #255
+ldr d23, [x23], #1
+ldr d25, [x0], #-256
+ldr q20, [x1], #255
+ldr q23, [x9], #1
+ldr q25, [x20], #-256
+str q10, [x1], #255
+str q22, [sp], #1
+str q21, [x20], #-256
+
+#-------------------------------------------------------------------------------
+# Load-store register (immediate pre-indexed)
+#-------------------------------------------------------------------------------
+
+ldr x3, [x4, #0]!
+strb w9, [x2, #255]!
+strb w10, [x3, #1]!
+strb w10, [x3, #-256]!
+strh w9, [x2, #255]!
+strh w9, [x2, #1]!
+strh w10, [x3, #-256]!
+str w19, [sp, #255]!
+str w20, [x30, #1]!
+str w21, [x12, #-256]!
+str xzr, [x9, #255]!
+str x2, [x3, #1]!
+str x19, [x12, #-256]!
+ldrb w9, [x2, #255]!
+ldrb w10, [x3, #1]!
+ldrb w10, [x3, #-256]!
+ldrh w9, [x2, #255]!
+ldrh w9, [x2, #1]!
+ldrh w10, [x3, #-256]!
+ldr w19, [sp, #255]!
+ldr w20, [x30, #1]!
+ldr w21, [x12, #-256]!
+ldr xzr, [x9, #255]!
+ldr x2, [x3, #1]!
+ldr x19, [x12, #-256]!
+ldrsb xzr, [x9, #255]!
+ldrsb x2, [x3, #1]!
+ldrsb x19, [x12, #-256]!
+ldrsh xzr, [x9, #255]!
+ldrsh x2, [x3, #1]!
+ldrsh x19, [x12, #-256]!
+ldrsw xzr, [x9, #255]!
+ldrsw x2, [x3, #1]!
+ldrsw x19, [x12, #-256]!
+ldrsb wzr, [x9, #255]!
+ldrsb w2, [x3, #1]!
+ldrsb w19, [x12, #-256]!
+ldrsh wzr, [x9, #255]!
+ldrsh w2, [x3, #1]!
+ldrsh w19, [x12, #-256]!
+str b0, [x0, #255]!
+str b3, [x3, #1]!
+str b5, [sp, #-256]!
+str h10, [x10, #255]!
+str h13, [x23, #1]!
+str h15, [sp, #-256]!
+str s20, [x20, #255]!
+str s23, [x23, #1]!
+str s25, [x0, #-256]!
+str d20, [x20, #255]!
+str d23, [x23, #1]!
+str d25, [x0, #-256]!
+ldr b0, [x0, #255]!
+ldr b3, [x3, #1]!
+ldr b5, [sp, #-256]!
+ldr h10, [x10, #255]!
+ldr h13, [x23, #1]!
+ldr h15, [sp, #-256]!
+ldr s20, [x20, #255]!
+ldr s23, [x23, #1]!
+ldr s25, [x0, #-256]!
+ldr d20, [x20, #255]!
+ldr d23, [x23, #1]!
+ldr d25, [x0, #-256]!
+ldr q20, [x1, #255]!
+ldr q23, [x9, #1]!
+ldr q25, [x20, #-256]!
+str q10, [x1, #255]!
+str q22, [sp, #1]!
+str q21, [x20, #-256]!
+
+#------------------------------------------------------------------------------
+# Load/store (unprivileged)
+#------------------------------------------------------------------------------
+
+sttrb w9, [sp]
+sttrh wzr, [x12, #255]
+sttr w16, [x0, #-256]
+sttr x28, [x14, #1]
+ldtrb w1, [x20, #255]
+ldtrh w20, [x1, #255]
+ldtr w12, [sp, #255]
+ldtr xzr, [x12, #255]
+ldtrsb x9, [x7, #-256]
+ldtrsh x17, [x19, #-256]
+ldtrsw x20, [x15, #-256]
+ldtrsb w19, [x1, #-256]
+ldtrsh w15, [x21, #-256]
+
+#------------------------------------------------------------------------------
+# Load/store (unsigned immediate)
+#------------------------------------------------------------------------------
+
+ldr x4, [x29]
+ldr x30, [x12, #32760]
+ldr x20, [sp, #8]
+ldr xzr, [sp]
+ldr w2, [sp]
+ldr w17, [sp, #16380]
+ldr w13, [x2, #4]
+ldrsw x2, [x5, #4]
+ldrsw x23, [sp, #16380]
+ldrh w2, [x4]
+ldrsh w23, [x6, #8190]
+ldrsh wzr, [sp, #2]
+ldrsh x29, [x2, #2]
+ldrb w26, [x3, #121]
+ldrb w12, [x2]
+ldrsb w27, [sp, #4095]
+ldrsb xzr, [x15]
+str x30, [sp]
+str w20, [x4, #16380]
+strh w17, [sp, #8190]
+strb w23, [x3, #4095]
+strb wzr, [x2]
+ldr b31, [sp, #4095]
+ldr h20, [x2, #8190]
+ldr s10, [x19, #16380]
+ldr d3, [x10, #32760]
+str q12, [sp, #65520]
+
+#------------------------------------------------------------------------------
+# Load/store (register offset)
+#------------------------------------------------------------------------------
+
+ldrb w3, [sp, x5]
+ldrb w9, [x27, x6]
+ldrsb w10, [x30, x7]
+ldrb w11, [x29, x3, sxtx]
+strb w12, [x28, xzr, sxtx]
+ldrb w14, [x26, w6, uxtw]
+ldrsb w15, [x25, w7, uxtw]
+ldrb w17, [x23, w9, sxtw]
+ldrsb x18, [x22, w10, sxtw]
+ldrsh w3, [sp, x5]
+ldrsh w9, [x27, x6]
+ldrh w10, [x30, x7, lsl #1]
+strh w11, [x29, x3, sxtx]
+ldrh w12, [x28, xzr, sxtx]
+ldrsh x13, [x27, x5, sxtx #1]
+ldrh w14, [x26, w6, uxtw]
+ldrh w15, [x25, w7, uxtw]
+ldrsh w16, [x24, w8, uxtw #1]
+ldrh w17, [x23, w9, sxtw]
+ldrh w18, [x22, w10, sxtw]
+strh w19, [x21, wzr, sxtw #1]
+ldr w3, [sp, x5]
+ldr s9, [x27, x6]
+ldr w10, [x30, x7, lsl #2]
+ldr w11, [x29, x3, sxtx]
+str s12, [x28, xzr, sxtx]
+str w13, [x27, x5, sxtx #2]
+str w14, [x26, w6, uxtw]
+ldr w15, [x25, w7, uxtw]
+ldr w16, [x24, w8, uxtw #2]
+ldrsw x17, [x23, w9, sxtw]
+ldr w18, [x22, w10, sxtw]
+ldrsw x19, [x21, wzr, sxtw #2]
+ldr x3, [sp, x5]
+str x9, [x27, x6]
+ldr d10, [x30, x7, lsl #3]
+str x11, [x29, x3, sxtx]
+ldr x12, [x28, xzr, sxtx]
+ldr x13, [x27, x5, sxtx #3]
+prfm pldl1keep, [x26, w6, uxtw]
+ldr x15, [x25, w7, uxtw]
+ldr x16, [x24, w8, uxtw #3]
+ldr x17, [x23, w9, sxtw]
+ldr x18, [x22, w10, sxtw]
+str d19, [x21, wzr, sxtw #3]
+ldr q3, [sp, x5]
+ldr q9, [x27, x6]
+ldr q10, [x30, x7, lsl #4]
+str q11, [x29, x3, sxtx]
+str q12, [x28, xzr, sxtx]
+str q13, [x27, x5, sxtx #4]
+ldr q14, [x26, w6, uxtw]
+ldr q15, [x25, w7, uxtw]
+ldr q16, [x24, w8, uxtw #4]
+ldr q17, [x23, w9, sxtw]
+str q18, [x22, w10, sxtw]
+ldr q19, [x21, wzr, sxtw #4]
+
+#------------------------------------------------------------------------------
+# Load/store register pair (offset)
+#------------------------------------------------------------------------------
+
+ldp w3, w5, [sp]
+stp wzr, w9, [sp, #252]
+ldp w2, wzr, [sp, #-256]
+ldp w9, w10, [sp, #4]
+ldpsw x9, x10, [sp, #4]
+ldpsw x9, x10, [x2, #-256]
+ldpsw x20, x30, [sp, #252]
+ldp x21, x29, [x2, #504]
+ldp x22, x23, [x3, #-512]
+ldp x24, x25, [x4, #8]
+ldp s29, s28, [sp, #252]
+stp s27, s26, [sp, #-256]
+ldp s1, s2, [x3, #44]
+stp d3, d5, [x9, #504]
+stp d7, d11, [x10, #-512]
+ldp d2, d3, [x30, #-8]
+stp q3, q5, [sp]
+stp q17, q19, [sp, #1008]
+ldp q23, q29, [x1, #-1024]
+
+#------------------------------------------------------------------------------
+# Load/store register pair (post-indexed)
+#------------------------------------------------------------------------------
+
+ldp w3, w5, [sp], #0
+stp wzr, w9, [sp], #252
+ldp w2, wzr, [sp], #-256
+ldp w9, w10, [sp], #4
+ldpsw x9, x10, [sp], #4
+ldpsw x9, x10, [x2], #-256
+ldpsw x20, x30, [sp], #252
+ldp x21, x29, [x2], #504
+ldp x22, x23, [x3], #-512
+ldp x24, x25, [x4], #8
+ldp s29, s28, [sp], #252
+stp s27, s26, [sp], #-256
+ldp s1, s2, [x3], #44
+stp d3, d5, [x9], #504
+stp d7, d11, [x10], #-512
+ldp d2, d3, [x30], #-8
+stp q3, q5, [sp], #0
+stp q17, q19, [sp], #1008
+ldp q23, q29, [x1], #-1024
+
+#------------------------------------------------------------------------------
+# Load/store register pair (pre-indexed)
+#------------------------------------------------------------------------------
+
+ldp w3, w5, [sp, #0]!
+stp wzr, w9, [sp, #252]!
+ldp w2, wzr, [sp, #-256]!
+ldp w9, w10, [sp, #4]!
+ldpsw x9, x10, [sp, #4]!
+ldpsw x9, x10, [x2, #-256]!
+ldpsw x20, x30, [sp, #252]!
+ldp x21, x29, [x2, #504]!
+ldp x22, x23, [x3, #-512]!
+ldp x24, x25, [x4, #8]!
+ldp s29, s28, [sp, #252]!
+stp s27, s26, [sp, #-256]!
+ldp s1, s2, [x3, #44]!
+stp d3, d5, [x9, #504]!
+stp d7, d11, [x10, #-512]!
+ldp d2, d3, [x30, #-8]!
+stp q3, q5, [sp, #0]!
+stp q17, q19, [sp, #1008]!
+ldp q23, q29, [x1, #-1024]!
+
+#------------------------------------------------------------------------------
+# Load/store register pair (offset)
+#------------------------------------------------------------------------------
+
+ldnp w3, w5, [sp]
+stnp wzr, w9, [sp, #252]
+ldnp w2, wzr, [sp, #-256]
+ldnp w9, w10, [sp, #4]
+ldnp x21, x29, [x2, #504]
+ldnp x22, x23, [x3, #-512]
+ldnp x24, x25, [x4, #8]
+ldnp s29, s28, [sp, #252]
+stnp s27, s26, [sp, #-256]
+ldnp s1, s2, [x3, #44]
+stnp d3, d5, [x9, #504]
+stnp d7, d11, [x10, #-512]
+ldnp d2, d3, [x30, #-8]
+stnp q3, q5, [sp]
+stnp q17, q19, [sp, #1008]
+ldnp q23, q29, [x1, #-1024]
+
+#------------------------------------------------------------------------------
+# Logical (immediate)
+#------------------------------------------------------------------------------
+
+mov w3, #983055
+mov x10, #-6148914691236517206
+
+#------------------------------------------------------------------------------
+# Logical (shifted register)
+#------------------------------------------------------------------------------
+
+and w12, w23, w21
+and w16, w15, w1, lsl #1
+and w9, w4, w10, lsl #31
+and w3, w30, w11
+and x3, x5, x7, lsl #63
+and x5, x14, x19, asr #4
+and w3, w17, w19, ror #31
+and w0, w2, wzr, lsr #17
+and w3, w30, w11, asr #2
+and xzr, x4, x26
+and w3, wzr, w20, ror #2
+and x7, x20, xzr, asr #63
+bic x13, x20, x14, lsl #47
+bic w2, w7, w9
+orr w2, w7, w0, asr #31
+orr x8, x9, x10, lsl #12
+orn x3, x5, x7, asr #2
+orn w2, w5, w29
+ands w7, wzr, w9, lsl #1
+ands x3, x5, x20, ror #63
+bics w3, w5, w7
+bics x3, xzr, x3, lsl #1
+tst w3, w7, lsl #31
+tst x2, x20, asr #2
+mov x3, x6
+mov x3, xzr
+mov wzr, w2
+mov w3, w5
+
+#------------------------------------------------------------------------------
+# Move wide (immediate)
+#------------------------------------------------------------------------------
+
+movz w2, #0, lsl #16
+mov w2, #-1235
+mov x2, #5299989643264
+mov x2, #0
+movk w3, #0
+movz x4, #0, lsl #16
+movk w5, #0, lsl #16
+movz x6, #0, lsl #32
+movk x7, #0, lsl #32
+movz x8, #0, lsl #48
+movk x9, #0, lsl #48
+
+#------------------------------------------------------------------------------
+# PC-relative addressing
+#------------------------------------------------------------------------------
+
+adr x2, #1600
+adrp x21, #6553600
+adr x0, #262144
+
+#------------------------------------------------------------------------------
+# Test and branch (immediate)
+#------------------------------------------------------------------------------
+
+tbz x12, #62, #0
+tbz x12, #62, #4
+tbz x12, #62, #-32768
+tbnz x12, #60, #32764
+
+#------------------------------------------------------------------------------
+# Unconditional branch (immediate)
+#------------------------------------------------------------------------------
+
+b #4
+b #-4
+b #134217724
+
+#------------------------------------------------------------------------------
+# Unconditional branch (register)
+#------------------------------------------------------------------------------
+
+br x20
+blr xzr
+ret x10
+ret
+eret
+drps
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 1 1 0.25 add w2, w3, #4095
+# CHECK-NEXT: 1 1 0.25 add w30, w29, #1, lsl #12
+# CHECK-NEXT: 1 1 0.25 add w13, w5, #4095, lsl #12
+# CHECK-NEXT: 1 1 0.25 add x5, x7, #1638
+# CHECK-NEXT: 1 1 0.25 add w20, wsp, #801
+# CHECK-NEXT: 1 1 0.25 add wsp, wsp, #1104
+# CHECK-NEXT: 1 1 0.25 add wsp, w30, #4084
+# CHECK-NEXT: 1 1 0.25 add x0, x24, #291
+# CHECK-NEXT: 1 1 0.25 add x3, x24, #4095, lsl #12
+# CHECK-NEXT: 1 1 0.25 add x8, sp, #1074
+# CHECK-NEXT: 1 1 0.25 add sp, x29, #3816
+# CHECK-NEXT: 1 1 0.25 sub w0, wsp, #4077
+# CHECK-NEXT: 1 1 0.25 sub w4, w20, #546, lsl #12
+# CHECK-NEXT: 1 1 0.25 sub sp, sp, #288
+# CHECK-NEXT: 1 1 0.25 sub wsp, w19, #16
+# CHECK-NEXT: 1 1 0.50 adds w13, w23, #291, lsl #12
+# CHECK-NEXT: 1 1 0.50 cmn w2, #4095
+# CHECK-NEXT: 1 1 0.50 adds w20, wsp, #0
+# CHECK-NEXT: 1 1 0.50 cmn x3, #1, lsl #12
+# CHECK-NEXT: 1 1 0.50 cmp sp, #20, lsl #12
+# CHECK-NEXT: 1 1 0.50 cmp x30, #4095
+# CHECK-NEXT: 1 1 0.50 subs x4, sp, #3822
+# CHECK-NEXT: 1 1 0.50 cmn w3, #291, lsl #12
+# CHECK-NEXT: 1 1 0.50 cmn wsp, #1365
+# CHECK-NEXT: 1 1 0.50 cmn sp, #1092, lsl #12
+# CHECK-NEXT: 1 1 0.25 mov sp, x30
+# CHECK-NEXT: 1 1 0.25 mov wsp, w20
+# CHECK-NEXT: 1 1 0.25 mov x11, sp
+# CHECK-NEXT: 1 1 0.25 mov w24, wsp
+# CHECK-NEXT: 1 1 0.25 add w3, w5, w7
+# CHECK-NEXT: 1 1 0.25 add wzr, w3, w5
+# CHECK-NEXT: 1 1 0.25 add w20, wzr, w4
+# CHECK-NEXT: 1 1 0.25 add w4, w6, wzr
+# CHECK-NEXT: 1 1 0.25 add w11, w13, w15
+# CHECK-NEXT: 2 2 0.50 add w9, w3, wzr, lsl #10
+# CHECK-NEXT: 2 2 0.50 add w17, w29, w20, lsl #31
+# CHECK-NEXT: 2 2 0.50 add w21, w22, w23, lsr #0
+# CHECK-NEXT: 2 2 0.50 add w24, w25, w26, lsr #18
+# CHECK-NEXT: 2 2 0.50 add w27, w28, w29, lsr #31
+# CHECK-NEXT: 2 2 0.50 add w2, w3, w4, asr #0
+# CHECK-NEXT: 2 2 0.50 add w5, w6, w7, asr #21
+# CHECK-NEXT: 2 2 0.50 add w8, w9, w10, asr #31
+# CHECK-NEXT: 1 1 0.25 add x3, x5, x7
+# CHECK-NEXT: 1 1 0.25 add xzr, x3, x5
+# CHECK-NEXT: 1 1 0.25 add x20, xzr, x4
+# CHECK-NEXT: 1 1 0.25 add x4, x6, xzr
+# CHECK-NEXT: 1 1 0.25 add x11, x13, x15
+# CHECK-NEXT: 2 2 0.50 add x9, x3, xzr, lsl #10
+# CHECK-NEXT: 2 2 0.50 add x17, x29, x20, lsl #63
+# CHECK-NEXT: 2 2 0.50 add x21, x22, x23, lsr #0
+# CHECK-NEXT: 2 2 0.50 add x24, x25, x26, lsr #18
+# CHECK-NEXT: 2 2 0.50 add x27, x28, x29, lsr #63
+# CHECK-NEXT: 2 2 0.50 add x2, x3, x4, asr #0
+# CHECK-NEXT: 2 2 0.50 add x5, x6, x7, asr #21
+# CHECK-NEXT: 2 2 0.50 add x8, x9, x10, asr #63
+# CHECK-NEXT: 1 1 0.25 adds w3, w5, w7
+# CHECK-NEXT: 1 1 0.25 cmn w3, w5
+# CHECK-NEXT: 1 1 0.25 adds w20, wzr, w4
+# CHECK-NEXT: 1 1 0.25 adds w4, w6, wzr
+# CHECK-NEXT: 1 1 0.25 adds w11, w13, w15
+# CHECK-NEXT: 2 2 0.50 adds w9, w3, wzr, lsl #10
+# CHECK-NEXT: 2 2 0.50 adds w17, w29, w20, lsl #31
+# CHECK-NEXT: 2 2 0.50 adds w21, w22, w23, lsr #0
+# CHECK-NEXT: 2 2 0.50 adds w24, w25, w26, lsr #18
+# CHECK-NEXT: 2 2 0.50 adds w27, w28, w29, lsr #31
+# CHECK-NEXT: 2 2 0.50 adds w2, w3, w4, asr #0
+# CHECK-NEXT: 2 2 0.50 adds w5, w6, w7, asr #21
+# CHECK-NEXT: 2 2 0.50 adds w8, w9, w10, asr #31
+# CHECK-NEXT: 1 1 0.25 adds x3, x5, x7
+# CHECK-NEXT: 1 1 0.25 cmn x3, x5
+# CHECK-NEXT: 1 1 0.25 adds x20, xzr, x4
+# CHECK-NEXT: 1 1 0.25 adds x4, x6, xzr
+# CHECK-NEXT: 1 1 0.25 adds x11, x13, x15
+# CHECK-NEXT: 2 2 0.50 adds x9, x3, xzr, lsl #10
+# CHECK-NEXT: 2 2 0.50 adds x17, x29, x20, lsl #63
+# CHECK-NEXT: 2 2 0.50 adds x21, x22, x23, lsr #0
+# CHECK-NEXT: 2 2 0.50 adds x24, x25, x26, lsr #18
+# CHECK-NEXT: 2 2 0.50 adds x27, x28, x29, lsr #63
+# CHECK-NEXT: 2 2 0.50 adds x2, x3, x4, asr #0
+# CHECK-NEXT: 2 2 0.50 adds x5, x6, x7, asr #21
+# CHECK-NEXT: 2 2 0.50 adds x8, x9, x10, asr #63
+# CHECK-NEXT: 1 1 0.25 sub w3, w5, w7
+# CHECK-NEXT: 1 1 0.25 sub wzr, w3, w5
+# CHECK-NEXT: 1 1 0.25 sub w4, w6, wzr
+# CHECK-NEXT: 1 1 0.25 sub w11, w13, w15
+# CHECK-NEXT: 2 2 0.50 sub w9, w3, wzr, lsl #10
+# CHECK-NEXT: 2 2 0.50 sub w17, w29, w20, lsl #31
+# CHECK-NEXT: 2 2 0.50 sub w21, w22, w23, lsr #0
+# CHECK-NEXT: 2 2 0.50 sub w24, w25, w26, lsr #18
+# CHECK-NEXT: 2 2 0.50 sub w27, w28, w29, lsr #31
+# CHECK-NEXT: 2 2 0.50 sub w2, w3, w4, asr #0
+# CHECK-NEXT: 2 2 0.50 sub w5, w6, w7, asr #21
+# CHECK-NEXT: 2 2 0.50 sub w8, w9, w10, asr #31
+# CHECK-NEXT: 1 1 0.25 sub x3, x5, x7
+# CHECK-NEXT: 1 1 0.25 sub xzr, x3, x5
+# CHECK-NEXT: 1 1 0.25 sub x4, x6, xzr
+# CHECK-NEXT: 1 1 0.25 sub x11, x13, x15
+# CHECK-NEXT: 2 2 0.50 sub x9, x3, xzr, lsl #10
+# CHECK-NEXT: 2 2 0.50 sub x17, x29, x20, lsl #63
+# CHECK-NEXT: 2 2 0.50 sub x21, x22, x23, lsr #0
+# CHECK-NEXT: 2 2 0.50 sub x24, x25, x26, lsr #18
+# CHECK-NEXT: 2 2 0.50 sub x27, x28, x29, lsr #63
+# CHECK-NEXT: 2 2 0.50 sub x2, x3, x4, asr #0
+# CHECK-NEXT: 2 2 0.50 sub x5, x6, x7, asr #21
+# CHECK-NEXT: 2 2 0.50 sub x8, x9, x10, asr #63
+# CHECK-NEXT: 1 1 0.25 subs w3, w5, w7
+# CHECK-NEXT: 1 1 0.25 cmp w3, w5
+# CHECK-NEXT: 1 1 0.25 subs w4, w6, wzr
+# CHECK-NEXT: 1 1 0.25 subs w11, w13, w15
+# CHECK-NEXT: 2 2 0.50 subs w9, w3, wzr, lsl #10
+# CHECK-NEXT: 2 2 0.50 subs w17, w29, w20, lsl #31
+# CHECK-NEXT: 2 2 0.50 subs w21, w22, w23, lsr #0
+# CHECK-NEXT: 2 2 0.50 subs w24, w25, w26, lsr #18
+# CHECK-NEXT: 2 2 0.50 subs w27, w28, w29, lsr #31
+# CHECK-NEXT: 2 2 0.50 subs w2, w3, w4, asr #0
+# CHECK-NEXT: 2 2 0.50 subs w5, w6, w7, asr #21
+# CHECK-NEXT: 2 2 0.50 subs w8, w9, w10, asr #31
+# CHECK-NEXT: 1 1 0.25 subs x3, x5, x7
+# CHECK-NEXT: 1 1 0.25 cmp x3, x5
+# CHECK-NEXT: 1 1 0.25 subs x4, x6, xzr
+# CHECK-NEXT: 1 1 0.25 subs x11, x13, x15
+# CHECK-NEXT: 2 2 0.50 subs x9, x3, xzr, lsl #10
+# CHECK-NEXT: 2 2 0.50 subs x17, x29, x20, lsl #63
+# CHECK-NEXT: 2 2 0.50 subs x21, x22, x23, lsr #0
+# CHECK-NEXT: 2 2 0.50 subs x24, x25, x26, lsr #18
+# CHECK-NEXT: 2 2 0.50 subs x27, x28, x29, lsr #63
+# CHECK-NEXT: 2 2 0.50 subs x2, x3, x4, asr #0
+# CHECK-NEXT: 2 2 0.50 subs x5, x6, x7, asr #21
+# CHECK-NEXT: 2 2 0.50 subs x8, x9, x10, asr #63
+# CHECK-NEXT: 1 1 0.25 cmn wzr, w4
+# CHECK-NEXT: 1 1 0.25 cmn w5, wzr
+# CHECK-NEXT: 1 1 0.25 cmn w6, w7
+# CHECK-NEXT: 2 2 0.50 cmn w8, w9, lsl #15
+# CHECK-NEXT: 2 2 0.50 cmn w10, w11, lsl #31
+# CHECK-NEXT: 2 2 0.50 cmn w12, w13, lsr #0
+# CHECK-NEXT: 2 2 0.50 cmn w14, w15, lsr #21
+# CHECK-NEXT: 2 2 0.50 cmn w16, w17, lsr #31
+# CHECK-NEXT: 2 2 0.50 cmn w18, w19, asr #0
+# CHECK-NEXT: 2 2 0.50 cmn w20, w21, asr #22
+# CHECK-NEXT: 2 2 0.50 cmn w22, w23, asr #31
+# CHECK-NEXT: 1 1 0.25 cmn x0, x3
+# CHECK-NEXT: 1 1 0.25 cmn xzr, x4
+# CHECK-NEXT: 1 1 0.25 cmn x5, xzr
+# CHECK-NEXT: 1 1 0.25 cmn x6, x7
+# CHECK-NEXT: 2 2 0.50 cmn x8, x9, lsl #15
+# CHECK-NEXT: 2 2 0.50 cmn x10, x11, lsl #63
+# CHECK-NEXT: 2 2 0.50 cmn x12, x13, lsr #0
+# CHECK-NEXT: 2 2 0.50 cmn x14, x15, lsr #41
+# CHECK-NEXT: 2 2 0.50 cmn x16, x17, lsr #63
+# CHECK-NEXT: 2 2 0.50 cmn x18, x19, asr #0
+# CHECK-NEXT: 2 2 0.50 cmn x20, x21, asr #55
+# CHECK-NEXT: 2 2 0.50 cmn x22, x23, asr #63
+# CHECK-NEXT: 1 1 0.25 cmp w0, w3
+# CHECK-NEXT: 1 1 0.25 cmp wzr, w4
+# CHECK-NEXT: 1 1 0.25 cmp w5, wzr
+# CHECK-NEXT: 1 1 0.25 cmp w6, w7
+# CHECK-NEXT: 2 2 0.50 cmp w8, w9, lsl #15
+# CHECK-NEXT: 2 2 0.50 cmp w10, w11, lsl #31
+# CHECK-NEXT: 2 2 0.50 cmp w12, w13, lsr #0
+# CHECK-NEXT: 2 2 0.50 cmp w14, w15, lsr #21
+# CHECK-NEXT: 2 2 0.50 cmp w18, w19, asr #0
+# CHECK-NEXT: 2 2 0.50 cmp w20, w21, asr #22
+# CHECK-NEXT: 2 2 0.50 cmp w22, w23, asr #31
+# CHECK-NEXT: 1 1 0.25 cmp x0, x3
+# CHECK-NEXT: 1 1 0.25 cmp xzr, x4
+# CHECK-NEXT: 1 1 0.25 cmp x5, xzr
+# CHECK-NEXT: 1 1 0.25 cmp x6, x7
+# CHECK-NEXT: 2 2 0.50 cmp x8, x9, lsl #15
+# CHECK-NEXT: 2 2 0.50 cmp x10, x11, lsl #63
+# CHECK-NEXT: 2 2 0.50 cmp x12, x13, lsr #0
+# CHECK-NEXT: 2 2 0.50 cmp x14, x15, lsr #41
+# CHECK-NEXT: 2 2 0.50 cmp x16, x17, lsr #63
+# CHECK-NEXT: 2 2 0.50 cmp x18, x19, asr #0
+# CHECK-NEXT: 2 2 0.50 cmp x20, x21, asr #55
+# CHECK-NEXT: 2 2 0.50 cmp x22, x23, asr #63
+# CHECK-NEXT: 1 1 0.25 cmp wzr, w0
+# CHECK-NEXT: 1 1 0.25 cmp xzr, x0
+# CHECK-NEXT: 1 1 0.50 adc w29, w27, w25
+# CHECK-NEXT: 1 1 0.50 adc wzr, w3, w4
+# CHECK-NEXT: 1 1 0.50 adc w9, wzr, w10
+# CHECK-NEXT: 1 1 0.50 adc w20, w0, wzr
+# CHECK-NEXT: 1 1 0.50 adc x29, x27, x25
+# CHECK-NEXT: 1 1 0.50 adc xzr, x3, x4
+# CHECK-NEXT: 1 1 0.50 adc x9, xzr, x10
+# CHECK-NEXT: 1 1 0.50 adc x20, x0, xzr
+# CHECK-NEXT: 1 1 0.50 adcs w29, w27, w25
+# CHECK-NEXT: 1 1 0.50 adcs wzr, w3, w4
+# CHECK-NEXT: 1 1 0.50 adcs w9, wzr, w10
+# CHECK-NEXT: 1 1 0.50 adcs w20, w0, wzr
+# CHECK-NEXT: 1 1 0.50 adcs x29, x27, x25
+# CHECK-NEXT: 1 1 0.50 adcs xzr, x3, x4
+# CHECK-NEXT: 1 1 0.50 adcs x9, xzr, x10
+# CHECK-NEXT: 1 1 0.50 adcs x20, x0, xzr
+# CHECK-NEXT: 1 1 0.50 sbc w29, w27, w25
+# CHECK-NEXT: 1 1 0.50 sbc wzr, w3, w4
+# CHECK-NEXT: 1 1 0.50 ngc w9, w10
+# CHECK-NEXT: 1 1 0.50 sbc w20, w0, wzr
+# CHECK-NEXT: 1 1 0.50 sbc x29, x27, x25
+# CHECK-NEXT: 1 1 0.50 sbc xzr, x3, x4
+# CHECK-NEXT: 1 1 0.50 ngc x9, x10
+# CHECK-NEXT: 1 1 0.50 sbc x20, x0, xzr
+# CHECK-NEXT: 1 1 0.50 sbcs w29, w27, w25
+# CHECK-NEXT: 1 1 0.50 sbcs wzr, w3, w4
+# CHECK-NEXT: 1 1 0.50 ngcs w9, w10
+# CHECK-NEXT: 1 1 0.50 sbcs w20, w0, wzr
+# CHECK-NEXT: 1 1 0.50 sbcs x29, x27, x25
+# CHECK-NEXT: 1 1 0.50 sbcs xzr, x3, x4
+# CHECK-NEXT: 1 1 0.50 ngcs x9, x10
+# CHECK-NEXT: 1 1 0.50 sbcs x20, x0, xzr
+# CHECK-NEXT: 1 1 0.50 ngc w3, w12
+# CHECK-NEXT: 1 1 0.50 ngc wzr, w9
+# CHECK-NEXT: 1 1 0.50 ngc w23, wzr
+# CHECK-NEXT: 1 1 0.50 ngc x29, x30
+# CHECK-NEXT: 1 1 0.50 ngc xzr, x0
+# CHECK-NEXT: 1 1 0.50 ngc x0, xzr
+# CHECK-NEXT: 1 1 0.50 ngcs w3, w12
+# CHECK-NEXT: 1 1 0.50 ngcs wzr, w9
+# CHECK-NEXT: 1 1 0.50 ngcs w23, wzr
+# CHECK-NEXT: 1 1 0.50 ngcs x29, x30
+# CHECK-NEXT: 1 1 0.50 ngcs xzr, x0
+# CHECK-NEXT: 1 1 0.50 ngcs x0, xzr
+# CHECK-NEXT: 1 1 0.50 sbfx x1, x2, #3, #2
+# CHECK-NEXT: 1 1 0.50 asr x3, x4, #63
+# CHECK-NEXT: 1 1 0.50 asr wzr, wzr, #31
+# CHECK-NEXT: 1 1 0.50 sbfx w12, w9, #0, #1
+# CHECK-NEXT: 1 1 0.50 ubfiz x4, x5, #52, #11
+# CHECK-NEXT: 1 1 0.50 ubfx xzr, x4, #0, #1
+# CHECK-NEXT: 1 1 0.50 ubfiz x4, xzr, #1, #6
+# CHECK-NEXT: 1 1 0.50 lsr x5, x6, #12
+# CHECK-NEXT: 1 1 0.50 bfi x4, x5, #52, #11
+# CHECK-NEXT: 1 1 0.50 bfxil xzr, x4, #0, #1
+# CHECK-NEXT: 1 1 0.50 bfc x4, #1, #6
+# CHECK-NEXT: 1 1 0.50 bfxil x5, x6, #12, #52
+# CHECK-NEXT: 1 1 0.50 sxtb w1, w2
+# CHECK-NEXT: 1 1 0.50 sxtb xzr, w3
+# CHECK-NEXT: 1 1 0.50 sxth w9, w10
+# CHECK-NEXT: 1 1 0.50 sxth x0, w1
+# CHECK-NEXT: 1 1 0.50 sxtw x3, w30
+# CHECK-NEXT: 1 1 0.50 uxtb w1, w2
+# CHECK-NEXT: 1 1 0.50 uxth w9, w10
+# CHECK-NEXT: 1 1 0.50 ubfx x3, x30, #0, #32
+# CHECK-NEXT: 1 1 0.50 asr w3, w2, #0
+# CHECK-NEXT: 1 1 0.50 asr w9, w10, #31
+# CHECK-NEXT: 1 1 0.50 asr x20, x21, #63
+# CHECK-NEXT: 1 1 0.50 asr w1, wzr, #3
+# CHECK-NEXT: 1 1 0.50 lsr w3, w2, #0
+# CHECK-NEXT: 1 1 0.50 lsr w9, w10, #31
+# CHECK-NEXT: 1 1 0.50 lsr x20, x21, #63
+# CHECK-NEXT: 1 1 0.50 lsr wzr, wzr, #3
+# CHECK-NEXT: 1 1 0.50 lsr w3, w2, #0
+# CHECK-NEXT: 1 1 0.50 lsl w9, w10, #31
+# CHECK-NEXT: 1 1 0.50 lsl x20, x21, #63
+# CHECK-NEXT: 1 1 0.50 lsl w1, wzr, #3
+# CHECK-NEXT: 1 1 0.50 sbfx w9, w10, #0, #1
+# CHECK-NEXT: 1 1 0.50 sbfiz x2, x3, #63, #1
+# CHECK-NEXT: 1 1 0.50 asr x19, x20, #0
+# CHECK-NEXT: 1 1 0.50 sbfiz x9, x10, #5, #59
+# CHECK-NEXT: 1 1 0.50 asr w9, w10, #0
+# CHECK-NEXT: 1 1 0.50 sbfiz w11, w12, #31, #1
+# CHECK-NEXT: 1 1 0.50 sbfiz w13, w14, #29, #3
+# CHECK-NEXT: 1 1 0.50 sbfiz xzr, xzr, #10, #11
+# CHECK-NEXT: 1 1 0.50 sbfx w9, w10, #0, #1
+# CHECK-NEXT: 1 1 0.50 asr x2, x3, #63
+# CHECK-NEXT: 1 1 0.50 asr x19, x20, #0
+# CHECK-NEXT: 1 1 0.50 asr x9, x10, #5
+# CHECK-NEXT: 1 1 0.50 asr w9, w10, #0
+# CHECK-NEXT: 1 1 0.50 asr w11, w12, #31
+# CHECK-NEXT: 1 1 0.50 asr w13, w14, #29
+# CHECK-NEXT: 1 1 0.50 sbfx xzr, xzr, #10, #11
+# CHECK-NEXT: 1 1 0.50 bfxil w9, w10, #0, #1
+# CHECK-NEXT: 1 1 0.50 bfi x2, x3, #63, #1
+# CHECK-NEXT: 1 1 0.50 bfxil x19, x20, #0, #64
+# CHECK-NEXT: 1 1 0.50 bfi x9, x10, #5, #59
+# CHECK-NEXT: 1 1 0.50 bfxil w9, w10, #0, #32
+# CHECK-NEXT: 1 1 0.50 bfi w11, w12, #31, #1
+# CHECK-NEXT: 1 1 0.50 bfi w13, w14, #29, #3
+# CHECK-NEXT: 1 1 0.50 bfc xzr, #10, #11
+# CHECK-NEXT: 1 1 0.50 bfxil w9, w10, #0, #1
+# CHECK-NEXT: 1 1 0.50 bfxil x2, x3, #63, #1
+# CHECK-NEXT: 1 1 0.50 bfxil x19, x20, #0, #64
+# CHECK-NEXT: 1 1 0.50 bfxil x9, x10, #5, #59
+# CHECK-NEXT: 1 1 0.50 bfxil w9, w10, #0, #32
+# CHECK-NEXT: 1 1 0.50 bfxil w11, w12, #31, #1
+# CHECK-NEXT: 1 1 0.50 bfxil w13, w14, #29, #3
+# CHECK-NEXT: 1 1 0.50 bfxil xzr, xzr, #10, #11
+# CHECK-NEXT: 1 1 0.50 ubfx w9, w10, #0, #1
+# CHECK-NEXT: 1 1 0.50 lsl x2, x3, #63
+# CHECK-NEXT: 1 1 0.50 lsr x19, x20, #0
+# CHECK-NEXT: 1 1 0.50 lsl x9, x10, #5
+# CHECK-NEXT: 1 1 0.50 lsr w9, w10, #0
+# CHECK-NEXT: 1 1 0.50 lsl w11, w12, #31
+# CHECK-NEXT: 1 1 0.50 lsl w13, w14, #29
+# CHECK-NEXT: 1 1 0.50 ubfiz xzr, xzr, #10, #11
+# CHECK-NEXT: 1 1 0.50 ubfx w9, w10, #0, #1
+# CHECK-NEXT: 1 1 0.50 lsr x2, x3, #63
+# CHECK-NEXT: 1 1 0.50 lsr x19, x20, #0
+# CHECK-NEXT: 1 1 0.50 lsr x9, x10, #5
+# CHECK-NEXT: 1 1 0.50 lsr w9, w10, #0
+# CHECK-NEXT: 1 1 0.50 lsr w11, w12, #31
+# CHECK-NEXT: 1 1 0.50 lsr w13, w14, #29
+# CHECK-NEXT: 1 1 0.50 ubfx xzr, xzr, #10, #11
+# CHECK-NEXT: 1 1 0.50 cbz w5, #4
+# CHECK-NEXT: 1 1 0.50 cbz x5, #0
+# CHECK-NEXT: 1 1 0.50 cbnz x2, #-4
+# CHECK-NEXT: 1 1 0.50 cbnz x26, #1048572
+# CHECK-NEXT: 1 1 0.50 cbz wzr, #0
+# CHECK-NEXT: 1 1 0.50 cbnz xzr, #0
+# CHECK-NEXT: 1 1 0.50 b.ne #4
+# CHECK-NEXT: 1 1 0.50 b.ge #1048572
+# CHECK-NEXT: 1 1 0.50 b.ge #-4
+# CHECK-NEXT: 1 1 0.50 ccmp w1, #31, #0, eq
+# CHECK-NEXT: 1 1 0.50 ccmp w3, #0, #15, hs
+# CHECK-NEXT: 1 1 0.50 ccmp wzr, #15, #13, hs
+# CHECK-NEXT: 1 1 0.50 ccmp x9, #31, #0, le
+# CHECK-NEXT: 1 1 0.50 ccmp x3, #0, #15, gt
+# CHECK-NEXT: 1 1 0.50 ccmp xzr, #5, #7, ne
+# CHECK-NEXT: 1 1 0.50 ccmn w1, #31, #0, eq
+# CHECK-NEXT: 1 1 0.50 ccmn w3, #0, #15, hs
+# CHECK-NEXT: 1 1 0.50 ccmn wzr, #15, #13, hs
+# CHECK-NEXT: 1 1 0.50 ccmn x9, #31, #0, le
+# CHECK-NEXT: 1 1 0.50 ccmn x3, #0, #15, gt
+# CHECK-NEXT: 1 1 0.50 ccmn xzr, #5, #7, ne
+# CHECK-NEXT: 1 1 0.50 ccmp w1, wzr, #0, eq
+# CHECK-NEXT: 1 1 0.50 ccmp w3, w0, #15, hs
+# CHECK-NEXT: 1 1 0.50 ccmp wzr, w15, #13, hs
+# CHECK-NEXT: 1 1 0.50 ccmp x9, xzr, #0, le
+# CHECK-NEXT: 1 1 0.50 ccmp x3, x0, #15, gt
+# CHECK-NEXT: 1 1 0.50 ccmp xzr, x5, #7, ne
+# CHECK-NEXT: 1 1 0.50 ccmn w1, wzr, #0, eq
+# CHECK-NEXT: 1 1 0.50 ccmn w3, w0, #15, hs
+# CHECK-NEXT: 1 1 0.50 ccmn wzr, w15, #13, hs
+# CHECK-NEXT: 1 1 0.50 ccmn x9, xzr, #0, le
+# CHECK-NEXT: 1 1 0.50 ccmn x3, x0, #15, gt
+# CHECK-NEXT: 1 1 0.50 ccmn xzr, x5, #7, ne
+# CHECK-NEXT: 1 1 0.50 csel w1, w0, w19, ne
+# CHECK-NEXT: 1 1 0.50 csel wzr, w5, w9, eq
+# CHECK-NEXT: 1 1 0.50 csel w9, wzr, w30, gt
+# CHECK-NEXT: 1 1 0.50 csel w1, w28, wzr, mi
+# CHECK-NEXT: 1 1 0.50 csel x19, x23, x29, lt
+# CHECK-NEXT: 1 1 0.50 csel xzr, x3, x4, ge
+# CHECK-NEXT: 1 1 0.50 csel x5, xzr, x6, hs
+# CHECK-NEXT: 1 1 0.50 csel x7, x8, xzr, lo
+# CHECK-NEXT: 1 1 0.50 csinc w1, w0, w19, ne
+# CHECK-NEXT: 1 1 0.50 csinc wzr, w5, w9, eq
+# CHECK-NEXT: 1 1 0.50 csinc w9, wzr, w30, gt
+# CHECK-NEXT: 1 1 0.50 csinc w1, w28, wzr, mi
+# CHECK-NEXT: 1 1 0.50 csinc x19, x23, x29, lt
+# CHECK-NEXT: 1 1 0.50 csinc xzr, x3, x4, ge
+# CHECK-NEXT: 1 1 0.50 csinc x5, xzr, x6, hs
+# CHECK-NEXT: 1 1 0.50 csinc x7, x8, xzr, lo
+# CHECK-NEXT: 1 1 0.50 csinv w1, w0, w19, ne
+# CHECK-NEXT: 1 1 0.50 csinv wzr, w5, w9, eq
+# CHECK-NEXT: 1 1 0.50 csinv w9, wzr, w30, gt
+# CHECK-NEXT: 1 1 0.50 csinv w1, w28, wzr, mi
+# CHECK-NEXT: 1 1 0.50 csinv x19, x23, x29, lt
+# CHECK-NEXT: 1 1 0.50 csinv xzr, x3, x4, ge
+# CHECK-NEXT: 1 1 0.50 csinv x5, xzr, x6, hs
+# CHECK-NEXT: 1 1 0.50 csinv x7, x8, xzr, lo
+# CHECK-NEXT: 1 1 0.50 csneg w1, w0, w19, ne
+# CHECK-NEXT: 1 1 0.50 csneg wzr, w5, w9, eq
+# CHECK-NEXT: 1 1 0.50 csneg w9, wzr, w30, gt
+# CHECK-NEXT: 1 1 0.50 csneg w1, w28, wzr, mi
+# CHECK-NEXT: 1 1 0.50 csneg x19, x23, x29, lt
+# CHECK-NEXT: 1 1 0.50 csneg xzr, x3, x4, ge
+# CHECK-NEXT: 1 1 0.50 csneg x5, xzr, x6, hs
+# CHECK-NEXT: 1 1 0.50 csneg x7, x8, xzr, lo
+# CHECK-NEXT: 1 1 0.50 cset w3, eq
+# CHECK-NEXT: 1 1 0.50 cset x9, pl
+# CHECK-NEXT: 1 1 0.50 csetm w20, ne
+# CHECK-NEXT: 1 1 0.50 csetm x30, ge
+# CHECK-NEXT: 1 1 0.50 csinc w2, wzr, wzr, al
+# CHECK-NEXT: 1 1 0.50 csinv x3, xzr, xzr, nv
+# CHECK-NEXT: 1 1 0.50 cinc w3, w5, gt
+# CHECK-NEXT: 1 1 0.50 cinc wzr, w4, le
+# CHECK-NEXT: 1 1 0.50 cset w9, lt
+# CHECK-NEXT: 1 1 0.50 cinc x3, x5, gt
+# CHECK-NEXT: 1 1 0.50 cinc xzr, x4, le
+# CHECK-NEXT: 1 1 0.50 cset x9, lt
+# CHECK-NEXT: 1 1 0.50 csinc w5, w6, w6, nv
+# CHECK-NEXT: 1 1 0.50 csinc x1, x2, x2, al
+# CHECK-NEXT: 1 1 0.50 cinv w3, w5, gt
+# CHECK-NEXT: 1 1 0.50 cinv wzr, w4, le
+# CHECK-NEXT: 1 1 0.50 csetm w9, lt
+# CHECK-NEXT: 1 1 0.50 cinv x3, x5, gt
+# CHECK-NEXT: 1 1 0.50 cinv xzr, x4, le
+# CHECK-NEXT: 1 1 0.50 csetm x9, lt
+# CHECK-NEXT: 1 1 0.50 csinv x1, x0, x0, al
+# CHECK-NEXT: 1 1 0.50 csinv w9, w8, w8, nv
+# CHECK-NEXT: 1 1 0.50 cneg w3, w5, gt
+# CHECK-NEXT: 1 1 0.50 cneg wzr, w4, le
+# CHECK-NEXT: 1 1 0.50 cneg w9, wzr, lt
+# CHECK-NEXT: 1 1 0.50 cneg x3, x5, gt
+# CHECK-NEXT: 1 1 0.50 cneg xzr, x4, le
+# CHECK-NEXT: 1 1 0.50 cneg x9, xzr, lt
+# CHECK-NEXT: 1 1 0.50 csneg x4, x8, x8, al
+# CHECK-NEXT: 1 1 0.50 csinv w9, w8, w8, nv
+# CHECK-NEXT: 1 1 0.50 rbit w0, w7
+# CHECK-NEXT: 1 1 0.50 rbit x18, x3
+# CHECK-NEXT: 1 1 0.50 rev16 w17, w1
+# CHECK-NEXT: 1 1 0.50 rev16 x5, x2
+# CHECK-NEXT: 1 1 0.50 rev w18, w0
+# CHECK-NEXT: 1 1 0.50 rev32 x20, x1
+# CHECK-NEXT: 1 1 0.50 rev x22, x2
+# CHECK-NEXT: 1 1 0.25 clz w24, w3
+# CHECK-NEXT: 1 1 0.25 clz x26, x4
+# CHECK-NEXT: 1 1 0.50 cls w3, w5
+# CHECK-NEXT: 1 1 0.50 cls x20, x5
+# CHECK-NEXT: 2 13 1.00 udiv w0, w7, w10
+# CHECK-NEXT: 3 13 2.00 udiv x9, x22, x4
+# CHECK-NEXT: 2 13 1.00 sdiv w12, w21, w0
+# CHECK-NEXT: 3 13 2.00 sdiv x13, x2, x1
+# CHECK-NEXT: 1 1 0.50 lsl w11, w12, w13
+# CHECK-NEXT: 1 1 0.50 lsl x14, x15, x16
+# CHECK-NEXT: 1 1 0.50 lsr w17, w18, w19
+# CHECK-NEXT: 1 1 0.50 lsr x20, x21, x22
+# CHECK-NEXT: 1 1 0.50 asr w23, w24, w25
+# CHECK-NEXT: 1 1 0.50 asr x26, x27, x28
+# CHECK-NEXT: 1 1 0.50 ror w0, w1, w2
+# CHECK-NEXT: 1 1 0.50 ror x3, x4, x5
+# CHECK-NEXT: 1 1 0.50 lsl w6, w7, w8
+# CHECK-NEXT: 1 1 0.50 lsl x9, x10, x11
+# CHECK-NEXT: 1 1 0.50 lsr w12, w13, w14
+# CHECK-NEXT: 1 1 0.50 lsr x15, x16, x17
+# CHECK-NEXT: 1 1 0.50 asr w18, w19, w20
+# CHECK-NEXT: 1 1 0.50 asr x21, x22, x23
+# CHECK-NEXT: 1 1 0.50 ror w24, w25, w26
+# CHECK-NEXT: 1 1 0.50 ror x27, x28, x29
+# CHECK-NEXT: 1 3 1.00 smulh x30, x29, x28
+# CHECK-NEXT: 1 3 1.00 smulh xzr, x27, x26
+# CHECK-NEXT: 1 3 1.00 umulh x30, x29, x28
+# CHECK-NEXT: 1 3 1.00 umulh x23, x30, xzr
+# CHECK-NEXT: 1 3 1.00 madd w1, w3, w7, w4
+# CHECK-NEXT: 1 3 1.00 madd wzr, w0, w9, w11
+# CHECK-NEXT: 1 3 1.00 madd w13, wzr, w4, w4
+# CHECK-NEXT: 1 3 1.00 madd w19, w30, wzr, w29
+# CHECK-NEXT: 1 3 1.00 mul w4, w5, w6
+# CHECK-NEXT: 1 3 1.00 madd x1, x3, x7, x4
+# CHECK-NEXT: 1 3 1.00 madd xzr, x0, x9, x11
+# CHECK-NEXT: 1 3 1.00 madd x13, xzr, x4, x4
+# CHECK-NEXT: 1 3 1.00 madd x19, x30, xzr, x29
+# CHECK-NEXT: 1 3 1.00 mul x4, x5, x6
+# CHECK-NEXT: 1 3 1.00 msub w1, w3, w7, w4
+# CHECK-NEXT: 1 3 1.00 msub wzr, w0, w9, w11
+# CHECK-NEXT: 1 3 1.00 msub w13, wzr, w4, w4
+# CHECK-NEXT: 1 3 1.00 msub w19, w30, wzr, w29
+# CHECK-NEXT: 1 3 1.00 mneg w4, w5, w6
+# CHECK-NEXT: 1 3 1.00 msub x1, x3, x7, x4
+# CHECK-NEXT: 1 3 1.00 msub xzr, x0, x9, x11
+# CHECK-NEXT: 1 3 1.00 msub x13, xzr, x4, x4
+# CHECK-NEXT: 1 3 1.00 msub x19, x30, xzr, x29
+# CHECK-NEXT: 1 3 1.00 mneg x4, x5, x6
+# CHECK-NEXT: 2 4 1.00 smaddl x3, w5, w2, x9
+# CHECK-NEXT: 2 4 1.00 smaddl xzr, w10, w11, x12
+# CHECK-NEXT: 2 4 1.00 smaddl x13, wzr, w14, x15
+# CHECK-NEXT: 2 4 1.00 smaddl x16, w17, wzr, x18
+# CHECK-NEXT: 2 4 1.00 smull x19, w20, w21
+# CHECK-NEXT: 2 4 1.00 smsubl x3, w5, w2, x9
+# CHECK-NEXT: 2 4 1.00 smsubl xzr, w10, w11, x12
+# CHECK-NEXT: 2 4 1.00 smsubl x13, wzr, w14, x15
+# CHECK-NEXT: 2 4 1.00 smsubl x16, w17, wzr, x18
+# CHECK-NEXT: 2 4 1.00 smnegl x19, w20, w21
+# CHECK-NEXT: 2 4 1.00 umaddl x3, w5, w2, x9
+# CHECK-NEXT: 2 4 1.00 umaddl xzr, w10, w11, x12
+# CHECK-NEXT: 2 4 1.00 umaddl x13, wzr, w14, x15
+# CHECK-NEXT: 2 4 1.00 umaddl x16, w17, wzr, x18
+# CHECK-NEXT: 2 4 1.00 umull x19, w20, w21
+# CHECK-NEXT: 2 4 1.00 umsubl x3, w5, w2, x9
+# CHECK-NEXT: 2 4 1.00 umsubl x16, w17, wzr, x18
+# CHECK-NEXT: 2 4 1.00 umnegl x19, w20, w21
+# CHECK-NEXT: 1 3 1.00 smulh x30, x29, x28
+# CHECK-NEXT: 1 3 1.00 smulh x23, x22, xzr
+# CHECK-NEXT: 1 3 1.00 umulh x23, x22, xzr
+# CHECK-NEXT: 1 3 1.00 mul x19, x20, xzr
+# CHECK-NEXT: 1 3 1.00 mneg w21, w22, w23
+# CHECK-NEXT: 2 4 1.00 smull x11, w13, w17
+# CHECK-NEXT: 2 4 1.00 umull x11, w13, w17
+# CHECK-NEXT: 2 4 1.00 smnegl x11, w13, w17
+# CHECK-NEXT: 2 4 1.00 umnegl x11, w13, w17
+# CHECK-NEXT: 1 1 0.50 extr w3, w5, w7, #0
+# CHECK-NEXT: 1 1 0.50 extr w11, w13, w17, #31
+# CHECK-NEXT: 1 1 0.50 extr x3, x5, x7, #15
+# CHECK-NEXT: 1 1 0.50 extr x11, x13, x17, #63
+# CHECK-NEXT: 1 1 0.50 ror x19, x23, #24
+# CHECK-NEXT: 1 1 0.50 ror x29, xzr, #63
+# CHECK-NEXT: 1 1 0.50 ror w9, w13, #31
+# CHECK-NEXT: 1 3 1.00 fcmp s3, s5
+# CHECK-NEXT: 1 3 1.00 fcmp s31, #0.0
+# CHECK-NEXT: 1 3 1.00 fcmp s31, #0.0
+# CHECK-NEXT: 1 3 1.00 fcmpe s29, s30
+# CHECK-NEXT: 1 3 1.00 fcmpe s15, #0.0
+# CHECK-NEXT: 1 3 1.00 fcmpe s15, #0.0
+# CHECK-NEXT: 1 3 1.00 fcmp d4, d12
+# CHECK-NEXT: 1 3 1.00 fcmp d23, #0.0
+# CHECK-NEXT: 1 3 1.00 fcmp d23, #0.0
+# CHECK-NEXT: 1 3 1.00 fcmpe d26, d22
+# CHECK-NEXT: 1 3 1.00 fcmpe d29, #0.0
+# CHECK-NEXT: 1 3 1.00 fcmpe d29, #0.0
+# CHECK-NEXT: 3 9 1.00 fccmp s1, s31, #0, eq
+# CHECK-NEXT: 3 9 1.00 fccmp s3, s0, #15, hs
+# CHECK-NEXT: 3 9 1.00 fccmp s31, s15, #13, hs
+# CHECK-NEXT: 3 9 1.00 fccmp d9, d31, #0, le
+# CHECK-NEXT: 3 9 1.00 fccmp d3, d0, #15, gt
+# CHECK-NEXT: 3 9 1.00 fccmp d31, d5, #7, ne
+# CHECK-NEXT: 3 9 1.00 fccmpe s1, s31, #0, eq
+# CHECK-NEXT: 3 9 1.00 fccmpe s3, s0, #15, hs
+# CHECK-NEXT: 3 9 1.00 fccmpe s31, s15, #13, hs
+# CHECK-NEXT: 3 9 1.00 fccmpe d9, d31, #0, le
+# CHECK-NEXT: 3 9 1.00 fccmpe d3, d0, #15, gt
+# CHECK-NEXT: 3 9 1.00 fccmpe d31, d5, #7, ne
+# CHECK-NEXT: 3 9 1.00 fcsel s3, s20, s9, pl
+# CHECK-NEXT: 3 9 1.00 fcsel d9, d10, d11, mi
+# CHECK-NEXT: 1 2 0.50 fmov s0, s1
+# CHECK-NEXT: 1 2 0.50 fabs s2, s3
+# CHECK-NEXT: 1 2 0.50 fneg s4, s5
+# CHECK-NEXT: 1 33 1.00 fsqrt s6, s7
+# CHECK-NEXT: 1 3 0.50 fcvt d8, s9
+# CHECK-NEXT: 1 3 0.50 fcvt h10, s11
+# CHECK-NEXT: 1 2 0.50 frintn s12, s13
+# CHECK-NEXT: 1 2 0.50 frintp s14, s15
+# CHECK-NEXT: 1 2 0.50 frintm s16, s17
+# CHECK-NEXT: 1 2 0.50 frintz s18, s19
+# CHECK-NEXT: 1 2 0.50 frinta s20, s21
+# CHECK-NEXT: 1 2 0.50 frintx s22, s23
+# CHECK-NEXT: 1 2 0.50 frinti s24, s25
+# CHECK-NEXT: 1 2 0.50 fmov d0, d1
+# CHECK-NEXT: 1 2 0.50 fabs d2, d3
+# CHECK-NEXT: 1 2 0.50 fneg d4, d5
+# CHECK-NEXT: 1 63 1.00 fsqrt d6, d7
+# CHECK-NEXT: 1 3 0.50 fcvt s8, d9
+# CHECK-NEXT: 1 3 0.50 fcvt h10, d11
+# CHECK-NEXT: 1 2 0.50 frintn d12, d13
+# CHECK-NEXT: 1 2 0.50 frintp d14, d15
+# CHECK-NEXT: 1 2 0.50 frintm d16, d17
+# CHECK-NEXT: 1 2 0.50 frintz d18, d19
+# CHECK-NEXT: 1 2 0.50 frinta d20, d21
+# CHECK-NEXT: 1 2 0.50 frintx d22, d23
+# CHECK-NEXT: 1 2 0.50 frinti d24, d25
+# CHECK-NEXT: 1 3 0.50 fcvt s26, h27
+# CHECK-NEXT: 1 3 0.50 fcvt d28, h29
+# CHECK-NEXT: 1 4 0.50 fmul s20, s19, s17
+# CHECK-NEXT: 1 12 1.00 fdiv s1, s2, s3
+# CHECK-NEXT: 1 2 0.50 fadd s4, s5, s6
+# CHECK-NEXT: 1 2 0.50 fsub s7, s8, s9
+# CHECK-NEXT: 1 2 0.50 fmax s10, s11, s12
+# CHECK-NEXT: 1 2 0.50 fmin s13, s14, s15
+# CHECK-NEXT: 1 2 0.50 fmaxnm s16, s17, s18
+# CHECK-NEXT: 1 2 0.50 fminnm s19, s20, s21
+# CHECK-NEXT: 1 4 0.50 fnmul s22, s23, s2
+# CHECK-NEXT: 1 4 0.50 fmul d20, d19, d17
+# CHECK-NEXT: 1 19 1.00 fdiv d1, d2, d3
+# CHECK-NEXT: 1 2 0.50 fadd d4, d5, d6
+# CHECK-NEXT: 1 2 0.50 fsub d7, d8, d9
+# CHECK-NEXT: 1 2 0.50 fmax d10, d11, d12
+# CHECK-NEXT: 1 2 0.50 fmin d13, d14, d15
+# CHECK-NEXT: 1 2 0.50 fmaxnm d16, d17, d18
+# CHECK-NEXT: 1 2 0.50 fminnm d19, d20, d21
+# CHECK-NEXT: 1 4 0.50 fnmul d22, d23, d24
+# CHECK-NEXT: 1 4 0.50 fmadd s3, s5, s6, s31
+# CHECK-NEXT: 1 4 0.50 fmadd d3, d13, d0, d23
+# CHECK-NEXT: 1 4 0.50 fmsub s3, s5, s6, s31
+# CHECK-NEXT: 1 4 0.50 fmsub d3, d13, d0, d23
+# CHECK-NEXT: 1 4 0.50 fnmadd s3, s5, s6, s31
+# CHECK-NEXT: 1 4 0.50 fnmadd d3, d13, d0, d23
+# CHECK-NEXT: 1 4 0.50 fnmsub s3, s5, s6, s31
+# CHECK-NEXT: 1 4 0.50 fnmsub d3, d13, d0, d23
+# CHECK-NEXT: 2 7 1.00 fcvtzs w3, h5, #1
+# CHECK-NEXT: 2 7 1.00 fcvtzs wzr, h20, #13
+# CHECK-NEXT: 2 7 1.00 fcvtzs w19, h0, #32
+# CHECK-NEXT: 2 7 1.00 fcvtzs x3, h5, #1
+# CHECK-NEXT: 2 7 1.00 fcvtzs x12, h30, #45
+# CHECK-NEXT: 2 7 1.00 fcvtzs x19, h0, #64
+# CHECK-NEXT: 2 7 1.00 fcvtzs w3, s5, #1
+# CHECK-NEXT: 2 7 1.00 fcvtzs wzr, s20, #13
+# CHECK-NEXT: 2 7 1.00 fcvtzs w19, s0, #32
+# CHECK-NEXT: 2 7 1.00 fcvtzs x3, s5, #1
+# CHECK-NEXT: 2 7 1.00 fcvtzs x12, s30, #45
+# CHECK-NEXT: 2 7 1.00 fcvtzs x19, s0, #64
+# CHECK-NEXT: 2 7 1.00 fcvtzs w3, d5, #1
+# CHECK-NEXT: 2 7 1.00 fcvtzs wzr, d20, #13
+# CHECK-NEXT: 2 7 1.00 fcvtzs w19, d0, #32
+# CHECK-NEXT: 2 7 1.00 fcvtzs x3, d5, #1
+# CHECK-NEXT: 2 7 1.00 fcvtzs x12, d30, #45
+# CHECK-NEXT: 2 7 1.00 fcvtzs x19, d0, #64
+# CHECK-NEXT: 2 7 1.00 fcvtzu w3, h5, #1
+# CHECK-NEXT: 2 7 1.00 fcvtzu wzr, h20, #13
+# CHECK-NEXT: 2 7 1.00 fcvtzu w19, h0, #32
+# CHECK-NEXT: 2 7 1.00 fcvtzu x3, h5, #1
+# CHECK-NEXT: 2 7 1.00 fcvtzu x12, h30, #45
+# CHECK-NEXT: 2 7 1.00 fcvtzu x19, h0, #64
+# CHECK-NEXT: 2 7 1.00 fcvtzu w3, s5, #1
+# CHECK-NEXT: 2 7 1.00 fcvtzu wzr, s20, #13
+# CHECK-NEXT: 2 7 1.00 fcvtzu w19, s0, #32
+# CHECK-NEXT: 2 7 1.00 fcvtzu x3, s5, #1
+# CHECK-NEXT: 2 7 1.00 fcvtzu x12, s30, #45
+# CHECK-NEXT: 2 7 1.00 fcvtzu x19, s0, #64
+# CHECK-NEXT: 2 7 1.00 fcvtzu w3, d5, #1
+# CHECK-NEXT: 2 7 1.00 fcvtzu wzr, d20, #13
+# CHECK-NEXT: 2 7 1.00 fcvtzu w19, d0, #32
+# CHECK-NEXT: 2 7 1.00 fcvtzu x3, d5, #1
+# CHECK-NEXT: 2 7 1.00 fcvtzu x12, d30, #45
+# CHECK-NEXT: 2 7 1.00 fcvtzu x19, d0, #64
+# CHECK-NEXT: 3 11 1.00 scvtf h23, w19, #1
+# CHECK-NEXT: 3 11 1.00 scvtf h31, wzr, #20
+# CHECK-NEXT: 3 11 1.00 scvtf h14, w0, #32
+# CHECK-NEXT: 3 11 1.00 scvtf h23, x19, #1
+# CHECK-NEXT: 3 11 1.00 scvtf h31, xzr, #20
+# CHECK-NEXT: 3 11 1.00 scvtf h14, x0, #64
+# CHECK-NEXT: 3 11 1.00 scvtf s23, w19, #1
+# CHECK-NEXT: 3 11 1.00 scvtf s31, wzr, #20
+# CHECK-NEXT: 3 11 1.00 scvtf s14, w0, #32
+# CHECK-NEXT: 3 11 1.00 scvtf s23, x19, #1
+# CHECK-NEXT: 3 11 1.00 scvtf s31, xzr, #20
+# CHECK-NEXT: 3 11 1.00 scvtf s14, x0, #64
+# CHECK-NEXT: 3 11 1.00 scvtf d23, w19, #1
+# CHECK-NEXT: 3 11 1.00 scvtf d31, wzr, #20
+# CHECK-NEXT: 3 11 1.00 scvtf d14, w0, #32
+# CHECK-NEXT: 3 11 1.00 scvtf d23, x19, #1
+# CHECK-NEXT: 3 11 1.00 scvtf d31, xzr, #20
+# CHECK-NEXT: 3 11 1.00 scvtf d14, x0, #64
+# CHECK-NEXT: 3 11 1.00 ucvtf h23, w19, #1
+# CHECK-NEXT: 3 11 1.00 ucvtf h31, wzr, #20
+# CHECK-NEXT: 3 11 1.00 ucvtf h14, w0, #32
+# CHECK-NEXT: 3 11 1.00 ucvtf h23, x19, #1
+# CHECK-NEXT: 3 11 1.00 ucvtf h31, xzr, #20
+# CHECK-NEXT: 3 11 1.00 ucvtf h14, x0, #64
+# CHECK-NEXT: 3 11 1.00 ucvtf s23, w19, #1
+# CHECK-NEXT: 3 11 1.00 ucvtf s31, wzr, #20
+# CHECK-NEXT: 3 11 1.00 ucvtf s14, w0, #32
+# CHECK-NEXT: 3 11 1.00 ucvtf s23, x19, #1
+# CHECK-NEXT: 3 11 1.00 ucvtf s31, xzr, #20
+# CHECK-NEXT: 3 11 1.00 ucvtf s14, x0, #64
+# CHECK-NEXT: 3 11 1.00 ucvtf d23, w19, #1
+# CHECK-NEXT: 3 11 1.00 ucvtf d31, wzr, #20
+# CHECK-NEXT: 3 11 1.00 ucvtf d14, w0, #32
+# CHECK-NEXT: 3 11 1.00 ucvtf d23, x19, #1
+# CHECK-NEXT: 3 11 1.00 ucvtf d31, xzr, #20
+# CHECK-NEXT: 3 11 1.00 ucvtf d14, x0, #64
+# CHECK-NEXT: 2 7 1.00 fcvtns w3, h31
+# CHECK-NEXT: 2 7 1.00 fcvtns xzr, h12
+# CHECK-NEXT: 2 7 1.00 fcvtnu wzr, h12
+# CHECK-NEXT: 2 7 1.00 fcvtnu x0, h0
+# CHECK-NEXT: 2 7 1.00 fcvtps wzr, h9
+# CHECK-NEXT: 2 7 1.00 fcvtps x12, h20
+# CHECK-NEXT: 2 7 1.00 fcvtpu w30, h23
+# CHECK-NEXT: 2 7 1.00 fcvtpu x29, h3
+# CHECK-NEXT: 2 7 1.00 fcvtms w2, h3
+# CHECK-NEXT: 2 7 1.00 fcvtms x4, h5
+# CHECK-NEXT: 2 7 1.00 fcvtmu w6, h7
+# CHECK-NEXT: 2 7 1.00 fcvtmu x8, h9
+# CHECK-NEXT: 2 7 1.00 fcvtzs w10, h11
+# CHECK-NEXT: 2 7 1.00 fcvtzs x12, h13
+# CHECK-NEXT: 2 7 1.00 fcvtzu w14, h15
+# CHECK-NEXT: 2 7 1.00 fcvtzu x15, h16
+# CHECK-NEXT: 3 11 1.00 scvtf h17, w18
+# CHECK-NEXT: 3 11 1.00 scvtf h19, x20
+# CHECK-NEXT: 3 11 1.00 ucvtf h21, w22
+# CHECK-NEXT: 3 11 1.00 scvtf h23, x24
+# CHECK-NEXT: 2 7 1.00 fcvtas w25, h26
+# CHECK-NEXT: 2 7 1.00 fcvtas x27, h28
+# CHECK-NEXT: 2 7 1.00 fcvtau w29, h30
+# CHECK-NEXT: 2 7 1.00 fcvtau xzr, h0
+# CHECK-NEXT: 2 7 1.00 fcvtns w3, s31
+# CHECK-NEXT: 2 7 1.00 fcvtns xzr, s12
+# CHECK-NEXT: 2 7 1.00 fcvtnu wzr, s12
+# CHECK-NEXT: 2 7 1.00 fcvtnu x0, s0
+# CHECK-NEXT: 2 7 1.00 fcvtps wzr, s9
+# CHECK-NEXT: 2 7 1.00 fcvtps x12, s20
+# CHECK-NEXT: 2 7 1.00 fcvtpu w30, s23
+# CHECK-NEXT: 2 7 1.00 fcvtpu x29, s3
+# CHECK-NEXT: 2 7 1.00 fcvtms w2, s3
+# CHECK-NEXT: 2 7 1.00 fcvtms x4, s5
+# CHECK-NEXT: 2 7 1.00 fcvtmu w6, s7
+# CHECK-NEXT: 2 7 1.00 fcvtmu x8, s9
+# CHECK-NEXT: 2 7 1.00 fcvtzs w10, s11
+# CHECK-NEXT: 2 7 1.00 fcvtzs x12, s13
+# CHECK-NEXT: 2 7 1.00 fcvtzu w14, s15
+# CHECK-NEXT: 2 7 1.00 fcvtzu x15, s16
+# CHECK-NEXT: 3 11 1.00 scvtf s17, w18
+# CHECK-NEXT: 3 11 1.00 scvtf s19, x20
+# CHECK-NEXT: 3 11 1.00 ucvtf s21, w22
+# CHECK-NEXT: 3 11 1.00 scvtf s23, x24
+# CHECK-NEXT: 2 7 1.00 fcvtas w25, s26
+# CHECK-NEXT: 2 7 1.00 fcvtas x27, s28
+# CHECK-NEXT: 2 7 1.00 fcvtau w29, s30
+# CHECK-NEXT: 2 7 1.00 fcvtau xzr, s0
+# CHECK-NEXT: 2 7 1.00 fcvtns w3, d31
+# CHECK-NEXT: 2 7 1.00 fcvtns xzr, d12
+# CHECK-NEXT: 2 7 1.00 fcvtnu wzr, d12
+# CHECK-NEXT: 2 7 1.00 fcvtnu x0, d0
+# CHECK-NEXT: 2 7 1.00 fcvtps wzr, d9
+# CHECK-NEXT: 2 7 1.00 fcvtps x12, d20
+# CHECK-NEXT: 2 7 1.00 fcvtpu w30, d23
+# CHECK-NEXT: 2 7 1.00 fcvtpu x29, d3
+# CHECK-NEXT: 2 7 1.00 fcvtms w2, d3
+# CHECK-NEXT: 2 7 1.00 fcvtms x4, d5
+# CHECK-NEXT: 2 7 1.00 fcvtmu w6, d7
+# CHECK-NEXT: 2 7 1.00 fcvtmu x8, d9
+# CHECK-NEXT: 2 7 1.00 fcvtzs w10, d11
+# CHECK-NEXT: 2 7 1.00 fcvtzs x12, d13
+# CHECK-NEXT: 2 7 1.00 fcvtzu w14, d15
+# CHECK-NEXT: 2 7 1.00 fcvtzu x15, d16
+# CHECK-NEXT: 3 11 1.00 scvtf d17, w18
+# CHECK-NEXT: 3 11 1.00 scvtf d19, x20
+# CHECK-NEXT: 3 11 1.00 ucvtf d21, w22
+# CHECK-NEXT: 3 11 1.00 ucvtf d23, x24
+# CHECK-NEXT: 2 7 1.00 fcvtas w25, d26
+# CHECK-NEXT: 2 7 1.00 fcvtas x27, d28
+# CHECK-NEXT: 2 7 1.00 fcvtau w29, d30
+# CHECK-NEXT: 2 7 1.00 fcvtau xzr, d0
+# CHECK-NEXT: 1 5 1.00 fmov w3, s9
+# CHECK-NEXT: 1 3 1.00 fmov s9, w3
+# CHECK-NEXT: 1 5 1.00 fmov x20, d31
+# CHECK-NEXT: 1 3 1.00 fmov d1, x15
+# CHECK-NEXT: 2 7 1.00 fmov x3, v12.d[1]
+# CHECK-NEXT: 1 5 1.00 fmov v1.d[1], x19
+# CHECK-NEXT: 1 2 0.50 fmov s2, #0.12500000
+# CHECK-NEXT: 1 2 0.50 fmov s3, #1.00000000
+# CHECK-NEXT: 1 2 0.50 fmov d30, #16.00000000
+# CHECK-NEXT: 1 2 0.50 fmov s4, #1.06250000
+# CHECK-NEXT: 1 2 0.50 fmov d10, #1.93750000
+# CHECK-NEXT: 1 2 0.50 fmov s12, #-1.00000000
+# CHECK-NEXT: 1 2 0.50 fmov d16, #8.50000000
+# CHECK-NEXT: 1 3 0.50 * ldr w3, #0
+# CHECK-NEXT: 1 3 0.50 * ldr x29, #4
+# CHECK-NEXT: 1 3 0.50 * ldrsw xzr, #-4
+# CHECK-NEXT: 1 3 0.50 * ldr s0, #8
+# CHECK-NEXT: 1 3 0.50 * ldr d0, #1048572
+# CHECK-NEXT: 1 3 0.50 * ldr q0, #-1048576
+# CHECK-NEXT: 1 1 0.50 U prfm pldl1strm, #0
+# CHECK-NEXT: 1 1 0.50 U prfm #22, #0
+# CHECK-NEXT: 2 4 0.50 * * U stxrb w18, w8, [sp]
+# CHECK-NEXT: 2 4 0.50 * * U stxrh w24, w15, [x16]
+# CHECK-NEXT: 2 4 0.50 * * U stxr w5, w6, [x17]
+# CHECK-NEXT: 2 4 0.50 * * U stxr w1, x10, [x21]
+# CHECK-NEXT: 1 3 0.50 * * U ldxrb w30, [x0]
+# CHECK-NEXT: 1 3 0.50 * * U ldxrh w17, [x4]
+# CHECK-NEXT: 1 3 0.50 * * U ldxr w22, [sp]
+# CHECK-NEXT: 1 3 0.50 * * U ldxr x11, [x29]
+# CHECK-NEXT: 1 3 0.50 * * U ldxr x11, [x29]
+# CHECK-NEXT: 1 3 0.50 * * U ldxr x11, [x29]
+# CHECK-NEXT: 2 4 0.50 * * U stxp w12, w11, w10, [sp]
+# CHECK-NEXT: 2 4 0.50 * * U stxp wzr, x27, x9, [x12]
+# CHECK-NEXT: 2 3 0.50 * * U ldxp w0, wzr, [sp]
+# CHECK-NEXT: 2 3 0.50 * * U ldxp x17, x0, [x18]
+# CHECK-NEXT: 2 3 0.50 * * U ldxp x17, x0, [x18]
+# CHECK-NEXT: 2 4 0.50 * * U stlxrb w12, w22, [x0]
+# CHECK-NEXT: 2 4 0.50 * * U stlxrh w10, w1, [x1]
+# CHECK-NEXT: 2 4 0.50 * * U stlxr w9, w2, [x2]
+# CHECK-NEXT: 2 4 0.50 * * U stlxr w9, x3, [sp]
+# CHECK-NEXT: 1 3 0.50 * * U ldaxrb w8, [x4]
+# CHECK-NEXT: 1 3 0.50 * * U ldaxrh w7, [x5]
+# CHECK-NEXT: 1 3 0.50 * * U ldaxr w6, [sp]
+# CHECK-NEXT: 1 3 0.50 * * U ldaxr x5, [x6]
+# CHECK-NEXT: 1 3 0.50 * * U ldaxr x5, [x6]
+# CHECK-NEXT: 1 3 0.50 * * U ldaxr x5, [x6]
+# CHECK-NEXT: 2 4 0.50 * * U stlxp w4, w5, w6, [sp]
+# CHECK-NEXT: 2 4 0.50 * * U stlxp wzr, x6, x7, [x1]
+# CHECK-NEXT: 2 3 0.50 * * U ldaxp w5, w18, [sp]
+# CHECK-NEXT: 2 3 0.50 * * U ldaxp x6, x19, [x22]
+# CHECK-NEXT: 2 3 0.50 * * U ldaxp x6, x19, [x22]
+# CHECK-NEXT: 1 1 0.50 * U stlrb w24, [sp]
+# CHECK-NEXT: 1 1 0.50 * U stlrh w25, [x30]
+# CHECK-NEXT: 1 1 0.50 * U stlr w26, [x29]
+# CHECK-NEXT: 1 1 0.50 * U stlr x27, [x28]
+# CHECK-NEXT: 1 1 0.50 * U stlr x27, [x28]
+# CHECK-NEXT: 1 1 0.50 * U stlr x27, [x28]
+# CHECK-NEXT: 1 3 0.50 * U ldarb w23, [sp]
+# CHECK-NEXT: 1 3 0.50 * U ldarh w22, [x30]
+# CHECK-NEXT: 1 3 0.50 * U ldar wzr, [x29]
+# CHECK-NEXT: 1 3 0.50 * U ldar x21, [x28]
+# CHECK-NEXT: 1 3 0.50 * U ldar x21, [x28]
+# CHECK-NEXT: 1 3 0.50 * U ldar x21, [x28]
+# CHECK-NEXT: 1 1 0.50 * sturb w9, [sp]
+# CHECK-NEXT: 1 1 0.50 * sturh wzr, [x12, #255]
+# CHECK-NEXT: 1 1 0.50 * stur w16, [x0, #-256]
+# CHECK-NEXT: 1 1 0.50 * stur x28, [x14, #1]
+# CHECK-NEXT: 1 3 0.50 * ldurb w1, [x20, #255]
+# CHECK-NEXT: 1 3 0.50 * ldurh w20, [x1, #255]
+# CHECK-NEXT: 1 3 0.50 * ldur w12, [sp, #255]
+# CHECK-NEXT: 1 3 0.50 * ldur xzr, [x12, #255]
+# CHECK-NEXT: 1 3 0.50 * ldursb x9, [x7, #-256]
+# CHECK-NEXT: 1 3 0.50 * ldursh x17, [x19, #-256]
+# CHECK-NEXT: 1 3 0.50 * ldursw x20, [x15, #-256]
+# CHECK-NEXT: 1 1 0.50 U prfum pldl2keep, [sp, #-256]
+# CHECK-NEXT: 1 3 0.50 * ldursb w19, [x1, #-256]
+# CHECK-NEXT: 1 3 0.50 * ldursh w15, [x21, #-256]
+# CHECK-NEXT: 2 2 1.00 * stur b0, [sp, #1]
+# CHECK-NEXT: 2 2 1.00 * stur h12, [x12, #-1]
+# CHECK-NEXT: 2 2 1.00 * stur s15, [x0, #255]
+# CHECK-NEXT: 2 2 1.00 * stur d31, [x5, #25]
+# CHECK-NEXT: 2 2 1.00 * stur q9, [x5]
+# CHECK-NEXT: 1 4 0.50 * ldur b3, [sp]
+# CHECK-NEXT: 1 4 0.50 * ldur h5, [x4, #-256]
+# CHECK-NEXT: 1 4 0.50 * ldur s7, [x12, #-1]
+# CHECK-NEXT: 1 4 0.50 * ldur d11, [x19, #4]
+# CHECK-NEXT: 1 4 0.50 * ldur q13, [x1, #2]
+# CHECK-NEXT: 2 1 0.50 * strb w9, [x2], #255
+# CHECK-NEXT: 2 1 0.50 * strb w10, [x3], #1
+# CHECK-NEXT: 2 1 0.50 * strb w10, [x3], #-256
+# CHECK-NEXT: 2 1 0.50 * strh w9, [x2], #255
+# CHECK-NEXT: 2 1 0.50 * strh w9, [x2], #1
+# CHECK-NEXT: 2 1 0.50 * strh w10, [x3], #-256
+# CHECK-NEXT: 2 1 0.50 * str w19, [sp], #255
+# CHECK-NEXT: 2 1 0.50 * str w20, [x30], #1
+# CHECK-NEXT: 2 1 0.50 * str w21, [x12], #-256
+# CHECK-NEXT: 2 1 0.50 * str xzr, [x9], #255
+# CHECK-NEXT: 2 1 0.50 * str x2, [x3], #1
+# CHECK-NEXT: 2 1 0.50 * str x19, [x12], #-256
+# CHECK-NEXT: 2 3 0.50 * ldrb w9, [x2], #255
+# CHECK-NEXT: 2 3 0.50 * ldrb w10, [x3], #1
+# CHECK-NEXT: 2 3 0.50 * ldrb w10, [x3], #-256
+# CHECK-NEXT: 2 3 0.50 * ldrh w9, [x2], #255
+# CHECK-NEXT: 2 3 0.50 * ldrh w9, [x2], #1
+# CHECK-NEXT: 2 3 0.50 * ldrh w10, [x3], #-256
+# CHECK-NEXT: 2 3 0.50 * ldr w19, [sp], #255
+# CHECK-NEXT: 2 3 0.50 * ldr w20, [x30], #1
+# CHECK-NEXT: 2 3 0.50 * ldr w21, [x12], #-256
+# CHECK-NEXT: 2 3 0.50 * ldr xzr, [x9], #255
+# CHECK-NEXT: 2 3 0.50 * ldr x2, [x3], #1
+# CHECK-NEXT: 2 3 0.50 * ldr x19, [x12], #-256
+# CHECK-NEXT: 2 3 0.50 * ldrsb xzr, [x9], #255
+# CHECK-NEXT: 2 3 0.50 * ldrsb x2, [x3], #1
+# CHECK-NEXT: 2 3 0.50 * ldrsb x19, [x12], #-256
+# CHECK-NEXT: 2 3 0.50 * ldrsh xzr, [x9], #255
+# CHECK-NEXT: 2 3 0.50 * ldrsh x2, [x3], #1
+# CHECK-NEXT: 2 3 0.50 * ldrsh x19, [x12], #-256
+# CHECK-NEXT: 2 3 0.50 * ldrsw xzr, [x9], #255
+# CHECK-NEXT: 2 3 0.50 * ldrsw x2, [x3], #1
+# CHECK-NEXT: 2 3 0.50 * ldrsw x19, [x12], #-256
+# CHECK-NEXT: 2 3 0.50 * ldrsb wzr, [x9], #255
+# CHECK-NEXT: 2 3 0.50 * ldrsb w2, [x3], #1
+# CHECK-NEXT: 2 3 0.50 * ldrsb w19, [x12], #-256
+# CHECK-NEXT: 2 3 0.50 * ldrsh wzr, [x9], #255
+# CHECK-NEXT: 2 3 0.50 * ldrsh w2, [x3], #1
+# CHECK-NEXT: 2 3 0.50 * ldrsh w19, [x12], #-256
+# CHECK-NEXT: 2 1 0.50 * str b0, [x0], #255
+# CHECK-NEXT: 2 1 0.50 * str b3, [x3], #1
+# CHECK-NEXT: 2 1 0.50 * str b5, [sp], #-256
+# CHECK-NEXT: 2 1 0.50 * str h10, [x10], #255
+# CHECK-NEXT: 2 1 0.50 * str h13, [x23], #1
+# CHECK-NEXT: 2 1 0.50 * str h15, [sp], #-256
+# CHECK-NEXT: 2 1 0.50 * str s20, [x20], #255
+# CHECK-NEXT: 2 1 0.50 * str s23, [x23], #1
+# CHECK-NEXT: 2 1 0.50 * str s25, [x0], #-256
+# CHECK-NEXT: 2 1 0.50 * str d20, [x20], #255
+# CHECK-NEXT: 2 1 0.50 * str d23, [x23], #1
+# CHECK-NEXT: 2 1 0.50 * str d25, [x0], #-256
+# CHECK-NEXT: 2 3 0.50 * ldr b0, [x0], #255
+# CHECK-NEXT: 2 3 0.50 * ldr b3, [x3], #1
+# CHECK-NEXT: 2 3 0.50 * ldr b5, [sp], #-256
+# CHECK-NEXT: 2 3 0.50 * ldr h10, [x10], #255
+# CHECK-NEXT: 2 3 0.50 * ldr h13, [x23], #1
+# CHECK-NEXT: 2 3 0.50 * ldr h15, [sp], #-256
+# CHECK-NEXT: 2 3 0.50 * ldr s20, [x20], #255
+# CHECK-NEXT: 2 3 0.50 * ldr s23, [x23], #1
+# CHECK-NEXT: 2 3 0.50 * ldr s25, [x0], #-256
+# CHECK-NEXT: 2 3 0.50 * ldr d20, [x20], #255
+# CHECK-NEXT: 2 3 0.50 * ldr d23, [x23], #1
+# CHECK-NEXT: 2 3 0.50 * ldr d25, [x0], #-256
+# CHECK-NEXT: 2 3 0.50 * ldr q20, [x1], #255
+# CHECK-NEXT: 2 3 0.50 * ldr q23, [x9], #1
+# CHECK-NEXT: 2 3 0.50 * ldr q25, [x20], #-256
+# CHECK-NEXT: 2 1 0.50 * str q10, [x1], #255
+# CHECK-NEXT: 2 1 0.50 * str q22, [sp], #1
+# CHECK-NEXT: 2 1 0.50 * str q21, [x20], #-256
+# CHECK-NEXT: 2 3 0.50 * ldr x3, [x4, #0]!
+# CHECK-NEXT: 2 1 0.50 * strb w9, [x2, #255]!
+# CHECK-NEXT: 2 1 0.50 * strb w10, [x3, #1]!
+# CHECK-NEXT: 2 1 0.50 * strb w10, [x3, #-256]!
+# CHECK-NEXT: 2 1 0.50 * strh w9, [x2, #255]!
+# CHECK-NEXT: 2 1 0.50 * strh w9, [x2, #1]!
+# CHECK-NEXT: 2 1 0.50 * strh w10, [x3, #-256]!
+# CHECK-NEXT: 2 1 0.50 * str w19, [sp, #255]!
+# CHECK-NEXT: 2 1 0.50 * str w20, [x30, #1]!
+# CHECK-NEXT: 2 1 0.50 * str w21, [x12, #-256]!
+# CHECK-NEXT: 2 1 0.50 * str xzr, [x9, #255]!
+# CHECK-NEXT: 2 1 0.50 * str x2, [x3, #1]!
+# CHECK-NEXT: 2 1 0.50 * str x19, [x12, #-256]!
+# CHECK-NEXT: 2 3 0.50 * ldrb w9, [x2, #255]!
+# CHECK-NEXT: 2 3 0.50 * ldrb w10, [x3, #1]!
+# CHECK-NEXT: 2 3 0.50 * ldrb w10, [x3, #-256]!
+# CHECK-NEXT: 2 3 0.50 * ldrh w9, [x2, #255]!
+# CHECK-NEXT: 2 3 0.50 * ldrh w9, [x2, #1]!
+# CHECK-NEXT: 2 3 0.50 * ldrh w10, [x3, #-256]!
+# CHECK-NEXT: 2 3 0.50 * ldr w19, [sp, #255]!
+# CHECK-NEXT: 2 3 0.50 * ldr w20, [x30, #1]!
+# CHECK-NEXT: 2 3 0.50 * ldr w21, [x12, #-256]!
+# CHECK-NEXT: 2 3 0.50 * ldr xzr, [x9, #255]!
+# CHECK-NEXT: 2 3 0.50 * ldr x2, [x3, #1]!
+# CHECK-NEXT: 2 3 0.50 * ldr x19, [x12, #-256]!
+# CHECK-NEXT: 2 3 0.50 * ldrsb xzr, [x9, #255]!
+# CHECK-NEXT: 2 3 0.50 * ldrsb x2, [x3, #1]!
+# CHECK-NEXT: 2 3 0.50 * ldrsb x19, [x12, #-256]!
+# CHECK-NEXT: 2 3 0.50 * ldrsh xzr, [x9, #255]!
+# CHECK-NEXT: 2 3 0.50 * ldrsh x2, [x3, #1]!
+# CHECK-NEXT: 2 3 0.50 * ldrsh x19, [x12, #-256]!
+# CHECK-NEXT: 2 3 0.50 * ldrsw xzr, [x9, #255]!
+# CHECK-NEXT: 2 3 0.50 * ldrsw x2, [x3, #1]!
+# CHECK-NEXT: 2 3 0.50 * ldrsw x19, [x12, #-256]!
+# CHECK-NEXT: 2 3 0.50 * ldrsb wzr, [x9, #255]!
+# CHECK-NEXT: 2 3 0.50 * ldrsb w2, [x3, #1]!
+# CHECK-NEXT: 2 3 0.50 * ldrsb w19, [x12, #-256]!
+# CHECK-NEXT: 2 3 0.50 * ldrsh wzr, [x9, #255]!
+# CHECK-NEXT: 2 3 0.50 * ldrsh w2, [x3, #1]!
+# CHECK-NEXT: 2 3 0.50 * ldrsh w19, [x12, #-256]!
+# CHECK-NEXT: 2 1 0.50 * str b0, [x0, #255]!
+# CHECK-NEXT: 2 1 0.50 * str b3, [x3, #1]!
+# CHECK-NEXT: 2 1 0.50 * str b5, [sp, #-256]!
+# CHECK-NEXT: 2 1 0.50 * str h10, [x10, #255]!
+# CHECK-NEXT: 2 1 0.50 * str h13, [x23, #1]!
+# CHECK-NEXT: 2 1 0.50 * str h15, [sp, #-256]!
+# CHECK-NEXT: 2 1 0.50 * str s20, [x20, #255]!
+# CHECK-NEXT: 2 1 0.50 * str s23, [x23, #1]!
+# CHECK-NEXT: 2 1 0.50 * str s25, [x0, #-256]!
+# CHECK-NEXT: 2 1 0.50 * str d20, [x20, #255]!
+# CHECK-NEXT: 2 1 0.50 * str d23, [x23, #1]!
+# CHECK-NEXT: 2 1 0.50 * str d25, [x0, #-256]!
+# CHECK-NEXT: 2 3 0.50 * ldr b0, [x0, #255]!
+# CHECK-NEXT: 2 3 0.50 * ldr b3, [x3, #1]!
+# CHECK-NEXT: 2 3 0.50 * ldr b5, [sp, #-256]!
+# CHECK-NEXT: 2 3 0.50 * ldr h10, [x10, #255]!
+# CHECK-NEXT: 2 3 0.50 * ldr h13, [x23, #1]!
+# CHECK-NEXT: 2 3 0.50 * ldr h15, [sp, #-256]!
+# CHECK-NEXT: 2 3 0.50 * ldr s20, [x20, #255]!
+# CHECK-NEXT: 2 3 0.50 * ldr s23, [x23, #1]!
+# CHECK-NEXT: 2 3 0.50 * ldr s25, [x0, #-256]!
+# CHECK-NEXT: 2 3 0.50 * ldr d20, [x20, #255]!
+# CHECK-NEXT: 2 3 0.50 * ldr d23, [x23, #1]!
+# CHECK-NEXT: 2 3 0.50 * ldr d25, [x0, #-256]!
+# CHECK-NEXT: 2 3 0.50 * ldr q20, [x1, #255]!
+# CHECK-NEXT: 2 3 0.50 * ldr q23, [x9, #1]!
+# CHECK-NEXT: 2 3 0.50 * ldr q25, [x20, #-256]!
+# CHECK-NEXT: 2 1 0.50 * str q10, [x1, #255]!
+# CHECK-NEXT: 2 1 0.50 * str q22, [sp, #1]!
+# CHECK-NEXT: 2 1 0.50 * str q21, [x20, #-256]!
+# CHECK-NEXT: 1 1 0.50 * sttrb w9, [sp]
+# CHECK-NEXT: 1 1 0.50 * sttrh wzr, [x12, #255]
+# CHECK-NEXT: 1 1 0.50 * sttr w16, [x0, #-256]
+# CHECK-NEXT: 1 1 0.50 * sttr x28, [x14, #1]
+# CHECK-NEXT: 1 3 0.50 * ldtrb w1, [x20, #255]
+# CHECK-NEXT: 1 3 0.50 * ldtrh w20, [x1, #255]
+# CHECK-NEXT: 1 3 0.50 * ldtr w12, [sp, #255]
+# CHECK-NEXT: 1 3 0.50 * ldtr xzr, [x12, #255]
+# CHECK-NEXT: 1 3 0.50 * ldtrsb x9, [x7, #-256]
+# CHECK-NEXT: 1 3 0.50 * ldtrsh x17, [x19, #-256]
+# CHECK-NEXT: 1 3 0.50 * ldtrsw x20, [x15, #-256]
+# CHECK-NEXT: 1 3 0.50 * ldtrsb w19, [x1, #-256]
+# CHECK-NEXT: 1 3 0.50 * ldtrsh w15, [x21, #-256]
+# CHECK-NEXT: 1 3 0.50 * ldr x4, [x29]
+# CHECK-NEXT: 1 3 0.50 * ldr x30, [x12, #32760]
+# CHECK-NEXT: 1 3 0.50 * ldr x20, [sp, #8]
+# CHECK-NEXT: 1 3 0.50 * ldr xzr, [sp]
+# CHECK-NEXT: 1 3 0.50 * ldr w2, [sp]
+# CHECK-NEXT: 1 3 0.50 * ldr w17, [sp, #16380]
+# CHECK-NEXT: 1 3 0.50 * ldr w13, [x2, #4]
+# CHECK-NEXT: 1 3 0.50 * ldrsw x2, [x5, #4]
+# CHECK-NEXT: 1 3 0.50 * ldrsw x23, [sp, #16380]
+# CHECK-NEXT: 1 3 0.50 * ldrh w2, [x4]
+# CHECK-NEXT: 1 3 0.50 * ldrsh w23, [x6, #8190]
+# CHECK-NEXT: 1 3 0.50 * ldrsh wzr, [sp, #2]
+# CHECK-NEXT: 1 3 0.50 * ldrsh x29, [x2, #2]
+# CHECK-NEXT: 1 3 0.50 * ldrb w26, [x3, #121]
+# CHECK-NEXT: 1 3 0.50 * ldrb w12, [x2]
+# CHECK-NEXT: 1 3 0.50 * ldrsb w27, [sp, #4095]
+# CHECK-NEXT: 1 3 0.50 * ldrsb xzr, [x15]
+# CHECK-NEXT: 1 1 0.50 * str x30, [sp]
+# CHECK-NEXT: 1 1 0.50 * str w20, [x4, #16380]
+# CHECK-NEXT: 1 1 0.50 * strh w17, [sp, #8190]
+# CHECK-NEXT: 1 1 0.50 * strb w23, [x3, #4095]
+# CHECK-NEXT: 1 1 0.50 * strb wzr, [x2]
+# CHECK-NEXT: 1 3 0.50 * ldr b31, [sp, #4095]
+# CHECK-NEXT: 1 3 0.50 * ldr h20, [x2, #8190]
+# CHECK-NEXT: 1 3 0.50 * ldr s10, [x19, #16380]
+# CHECK-NEXT: 1 3 0.50 * ldr d3, [x10, #32760]
+# CHECK-NEXT: 2 2 1.00 * str q12, [sp, #65520]
+# CHECK-NEXT: 1 3 0.50 * ldrb w3, [sp, x5]
+# CHECK-NEXT: 1 3 0.50 * ldrb w9, [x27, x6]
+# CHECK-NEXT: 1 3 0.50 * ldrsb w10, [x30, x7]
+# CHECK-NEXT: 1 3 0.50 * ldrb w11, [x29, x3, sxtx]
+# CHECK-NEXT: 2 1 1.00 * strb w12, [x28, xzr, sxtx]
+# CHECK-NEXT: 1 3 0.50 * ldrb w14, [x26, w6, uxtw]
+# CHECK-NEXT: 1 3 0.50 * ldrsb w15, [x25, w7, uxtw]
+# CHECK-NEXT: 1 3 0.50 * ldrb w17, [x23, w9, sxtw]
+# CHECK-NEXT: 1 3 0.50 * ldrsb x18, [x22, w10, sxtw]
+# CHECK-NEXT: 1 3 0.50 * ldrsh w3, [sp, x5]
+# CHECK-NEXT: 1 3 0.50 * ldrsh w9, [x27, x6]
+# CHECK-NEXT: 1 3 0.50 * ldrh w10, [x30, x7, lsl #1]
+# CHECK-NEXT: 2 1 1.00 * strh w11, [x29, x3, sxtx]
+# CHECK-NEXT: 1 3 0.50 * ldrh w12, [x28, xzr, sxtx]
+# CHECK-NEXT: 1 3 0.50 * ldrsh x13, [x27, x5, sxtx #1]
+# CHECK-NEXT: 1 3 0.50 * ldrh w14, [x26, w6, uxtw]
+# CHECK-NEXT: 1 3 0.50 * ldrh w15, [x25, w7, uxtw]
+# CHECK-NEXT: 1 3 0.50 * ldrsh w16, [x24, w8, uxtw #1]
+# CHECK-NEXT: 1 3 0.50 * ldrh w17, [x23, w9, sxtw]
+# CHECK-NEXT: 1 3 0.50 * ldrh w18, [x22, w10, sxtw]
+# CHECK-NEXT: 2 1 1.00 * strh w19, [x21, wzr, sxtw #1]
+# CHECK-NEXT: 1 3 0.50 * ldr w3, [sp, x5]
+# CHECK-NEXT: 1 4 0.50 * ldr s9, [x27, x6]
+# CHECK-NEXT: 1 3 0.50 * ldr w10, [x30, x7, lsl #2]
+# CHECK-NEXT: 1 3 0.50 * ldr w11, [x29, x3, sxtx]
+# CHECK-NEXT: 2 2 1.00 * str s12, [x28, xzr, sxtx]
+# CHECK-NEXT: 2 1 1.00 * str w13, [x27, x5, sxtx #2]
+# CHECK-NEXT: 2 1 1.00 * str w14, [x26, w6, uxtw]
+# CHECK-NEXT: 1 3 0.50 * ldr w15, [x25, w7, uxtw]
+# CHECK-NEXT: 1 3 0.50 * ldr w16, [x24, w8, uxtw #2]
+# CHECK-NEXT: 1 3 0.50 * ldrsw x17, [x23, w9, sxtw]
+# CHECK-NEXT: 1 3 0.50 * ldr w18, [x22, w10, sxtw]
+# CHECK-NEXT: 1 3 0.50 * ldrsw x19, [x21, wzr, sxtw #2]
+# CHECK-NEXT: 1 3 0.50 * ldr x3, [sp, x5]
+# CHECK-NEXT: 2 1 1.00 * str x9, [x27, x6]
+# CHECK-NEXT: 1 4 0.50 * ldr d10, [x30, x7, lsl #3]
+# CHECK-NEXT: 2 1 1.00 * str x11, [x29, x3, sxtx]
+# CHECK-NEXT: 1 3 0.50 * ldr x12, [x28, xzr, sxtx]
+# CHECK-NEXT: 1 3 0.50 * ldr x13, [x27, x5, sxtx #3]
+# CHECK-NEXT: 1 1 0.50 U prfm pldl1keep, [x26, w6, uxtw]
+# CHECK-NEXT: 1 3 0.50 * ldr x15, [x25, w7, uxtw]
+# CHECK-NEXT: 1 3 0.50 * ldr x16, [x24, w8, uxtw #3]
+# CHECK-NEXT: 1 3 0.50 * ldr x17, [x23, w9, sxtw]
+# CHECK-NEXT: 1 3 0.50 * ldr x18, [x22, w10, sxtw]
+# CHECK-NEXT: 2 2 1.00 * str d19, [x21, wzr, sxtw #3]
+# CHECK-NEXT: 1 4 0.50 * ldr q3, [sp, x5]
+# CHECK-NEXT: 1 4 0.50 * ldr q9, [x27, x6]
+# CHECK-NEXT: 1 4 0.50 * ldr q10, [x30, x7, lsl #4]
+# CHECK-NEXT: 2 2 1.00 * str q11, [x29, x3, sxtx]
+# CHECK-NEXT: 2 2 1.00 * str q12, [x28, xzr, sxtx]
+# CHECK-NEXT: 2 2 1.00 * str q13, [x27, x5, sxtx #4]
+# CHECK-NEXT: 1 4 0.50 * ldr q14, [x26, w6, uxtw]
+# CHECK-NEXT: 1 4 0.50 * ldr q15, [x25, w7, uxtw]
+# CHECK-NEXT: 1 4 0.50 * ldr q16, [x24, w8, uxtw #4]
+# CHECK-NEXT: 1 4 0.50 * ldr q17, [x23, w9, sxtw]
+# CHECK-NEXT: 2 2 1.00 * str q18, [x22, w10, sxtw]
+# CHECK-NEXT: 1 4 0.50 * ldr q19, [x21, wzr, sxtw #4]
+# CHECK-NEXT: 1 3 0.50 * ldp w3, w5, [sp]
+# CHECK-NEXT: 2 2 0.50 * stp wzr, w9, [sp, #252]
+# CHECK-NEXT: 1 3 0.50 * ldp w2, wzr, [sp, #-256]
+# CHECK-NEXT: 1 3 0.50 * ldp w9, w10, [sp, #4]
+# CHECK-NEXT: 2 5 1.00 * ldpsw x9, x10, [sp, #4]
+# CHECK-NEXT: 2 5 1.00 * ldpsw x9, x10, [x2, #-256]
+# CHECK-NEXT: 2 5 1.00 * ldpsw x20, x30, [sp, #252]
+# CHECK-NEXT: 1 3 0.50 * ldp x21, x29, [x2, #504]
+# CHECK-NEXT: 1 3 0.50 * ldp x22, x23, [x3, #-512]
+# CHECK-NEXT: 1 3 0.50 * ldp x24, x25, [x4, #8]
+# CHECK-NEXT: 2 5 1.00 * ldp s29, s28, [sp, #252]
+# CHECK-NEXT: 4 3 2.00 * stp s27, s26, [sp, #-256]
+# CHECK-NEXT: 2 5 1.00 * ldp s1, s2, [x3, #44]
+# CHECK-NEXT: 4 3 2.00 * stp d3, d5, [x9, #504]
+# CHECK-NEXT: 4 3 2.00 * stp d7, d11, [x10, #-512]
+# CHECK-NEXT: 2 5 1.00 * ldp d2, d3, [x30, #-8]
+# CHECK-NEXT: 4 3 2.00 * stp q3, q5, [sp]
+# CHECK-NEXT: 4 3 2.00 * stp q17, q19, [sp, #1008]
+# CHECK-NEXT: 2 4 1.00 * ldp q23, q29, [x1, #-1024]
+# CHECK-NEXT: 1 3 0.50 * ldp w3, w5, [sp], #0
+# CHECK-NEXT: 3 2 0.50 * stp wzr, w9, [sp], #252
+# CHECK-NEXT: 1 3 0.50 * ldp w2, wzr, [sp], #-256
+# CHECK-NEXT: 1 3 0.50 * ldp w9, w10, [sp], #4
+# CHECK-NEXT: 2 5 1.00 * ldpsw x9, x10, [sp], #4
+# CHECK-NEXT: 2 5 1.00 * ldpsw x9, x10, [x2], #-256
+# CHECK-NEXT: 2 5 1.00 * ldpsw x20, x30, [sp], #252
+# CHECK-NEXT: 1 3 0.50 * ldp x21, x29, [x2], #504
+# CHECK-NEXT: 1 3 0.50 * ldp x22, x23, [x3], #-512
+# CHECK-NEXT: 1 3 0.50 * ldp x24, x25, [x4], #8
+# CHECK-NEXT: 2 5 1.00 * ldp s29, s28, [sp], #252
+# CHECK-NEXT: 4 3 2.00 * stp s27, s26, [sp], #-256
+# CHECK-NEXT: 2 5 1.00 * ldp s1, s2, [x3], #44
+# CHECK-NEXT: 4 3 2.00 * stp d3, d5, [x9], #504
+# CHECK-NEXT: 4 3 2.00 * stp d7, d11, [x10], #-512
+# CHECK-NEXT: 2 5 1.00 * ldp d2, d3, [x30], #-8
+# CHECK-NEXT: 4 3 2.00 * stp q3, q5, [sp], #0
+# CHECK-NEXT: 4 3 2.00 * stp q17, q19, [sp], #1008
+# CHECK-NEXT: 2 4 1.00 * ldp q23, q29, [x1], #-1024
+# CHECK-NEXT: 1 3 0.50 * ldp w3, w5, [sp, #0]!
+# CHECK-NEXT: 3 2 0.50 * stp wzr, w9, [sp, #252]!
+# CHECK-NEXT: 1 3 0.50 * ldp w2, wzr, [sp, #-256]!
+# CHECK-NEXT: 1 3 0.50 * ldp w9, w10, [sp, #4]!
+# CHECK-NEXT: 2 5 1.00 * ldpsw x9, x10, [sp, #4]!
+# CHECK-NEXT: 2 5 1.00 * ldpsw x9, x10, [x2, #-256]!
+# CHECK-NEXT: 2 5 1.00 * ldpsw x20, x30, [sp, #252]!
+# CHECK-NEXT: 1 3 0.50 * ldp x21, x29, [x2, #504]!
+# CHECK-NEXT: 1 3 0.50 * ldp x22, x23, [x3, #-512]!
+# CHECK-NEXT: 1 3 0.50 * ldp x24, x25, [x4, #8]!
+# CHECK-NEXT: 2 5 1.00 * ldp s29, s28, [sp, #252]!
+# CHECK-NEXT: 4 3 2.00 * stp s27, s26, [sp, #-256]!
+# CHECK-NEXT: 2 5 1.00 * ldp s1, s2, [x3, #44]!
+# CHECK-NEXT: 4 3 2.00 * stp d3, d5, [x9, #504]!
+# CHECK-NEXT: 4 3 2.00 * stp d7, d11, [x10, #-512]!
+# CHECK-NEXT: 2 5 1.00 * ldp d2, d3, [x30, #-8]!
+# CHECK-NEXT: 4 3 2.00 * stp q3, q5, [sp, #0]!
+# CHECK-NEXT: 4 3 2.00 * stp q17, q19, [sp, #1008]!
+# CHECK-NEXT: 2 4 1.00 * ldp q23, q29, [x1, #-1024]!
+# CHECK-NEXT: 1 3 0.50 * ldnp w3, w5, [sp]
+# CHECK-NEXT: 2 1 1.00 * stnp wzr, w9, [sp, #252]
+# CHECK-NEXT: 1 3 0.50 * ldnp w2, wzr, [sp, #-256]
+# CHECK-NEXT: 1 3 0.50 * ldnp w9, w10, [sp, #4]
+# CHECK-NEXT: 1 3 0.50 * ldnp x21, x29, [x2, #504]
+# CHECK-NEXT: 1 3 0.50 * ldnp x22, x23, [x3, #-512]
+# CHECK-NEXT: 1 3 0.50 * ldnp x24, x25, [x4, #8]
+# CHECK-NEXT: 2 5 1.00 * ldnp s29, s28, [sp, #252]
+# CHECK-NEXT: 4 3 2.00 * stnp s27, s26, [sp, #-256]
+# CHECK-NEXT: 2 5 1.00 * ldnp s1, s2, [x3, #44]
+# CHECK-NEXT: 4 3 2.00 * stnp d3, d5, [x9, #504]
+# CHECK-NEXT: 4 3 2.00 * stnp d7, d11, [x10, #-512]
+# CHECK-NEXT: 2 5 1.00 * ldnp d2, d3, [x30, #-8]
+# CHECK-NEXT: 4 3 2.00 * stnp q3, q5, [sp]
+# CHECK-NEXT: 4 3 2.00 * stnp q17, q19, [sp, #1008]
+# CHECK-NEXT: 2 4 1.00 * ldnp q23, q29, [x1, #-1024]
+# CHECK-NEXT: 1 1 0.25 mov w3, #983055
+# CHECK-NEXT: 1 1 0.25 mov x10, #-6148914691236517206
+# CHECK-NEXT: 1 1 0.25 and w12, w23, w21
+# CHECK-NEXT: 1 1 0.25 and w16, w15, w1, lsl #1
+# CHECK-NEXT: 2 2 0.50 and w9, w4, w10, lsl #31
+# CHECK-NEXT: 1 1 0.25 and w3, w30, w11
+# CHECK-NEXT: 2 2 0.50 and x3, x5, x7, lsl #63
+# CHECK-NEXT: 2 2 0.50 and x5, x14, x19, asr #4
+# CHECK-NEXT: 2 2 0.50 and w3, w17, w19, ror #31
+# CHECK-NEXT: 2 2 0.50 and w0, w2, wzr, lsr #17
+# CHECK-NEXT: 2 2 0.50 and w3, w30, w11, asr #2
+# CHECK-NEXT: 1 1 0.25 and xzr, x4, x26
+# CHECK-NEXT: 2 2 0.50 and w3, wzr, w20, ror #2
+# CHECK-NEXT: 2 2 0.50 and x7, x20, xzr, asr #63
+# CHECK-NEXT: 2 2 0.50 bic x13, x20, x14, lsl #47
+# CHECK-NEXT: 1 1 0.25 bic w2, w7, w9
+# CHECK-NEXT: 2 2 0.50 orr w2, w7, w0, asr #31
+# CHECK-NEXT: 2 2 0.50 orr x8, x9, x10, lsl #12
+# CHECK-NEXT: 2 2 0.50 orn x3, x5, x7, asr #2
+# CHECK-NEXT: 1 1 0.25 orn w2, w5, w29
+# CHECK-NEXT: 1 1 0.25 ands w7, wzr, w9, lsl #1
+# CHECK-NEXT: 2 2 0.50 ands x3, x5, x20, ror #63
+# CHECK-NEXT: 1 1 0.25 bics w3, w5, w7
+# CHECK-NEXT: 1 1 0.25 bics x3, xzr, x3, lsl #1
+# CHECK-NEXT: 2 2 0.50 tst w3, w7, lsl #31
+# CHECK-NEXT: 2 2 0.50 tst x2, x20, asr #2
+# CHECK-NEXT: 1 1 0.25 mov x3, x6
+# CHECK-NEXT: 1 1 0.25 mov x3, xzr
+# CHECK-NEXT: 1 1 0.25 mov wzr, w2
+# CHECK-NEXT: 1 1 0.25 mov w3, w5
+# CHECK-NEXT: 1 1 0.25 movz w2, #0, lsl #16
+# CHECK-NEXT: 1 1 0.25 mov w2, #-1235
+# CHECK-NEXT: 1 1 0.25 mov x2, #5299989643264
+# CHECK-NEXT: 1 1 0.25 mov x2, #0
+# CHECK-NEXT: 1 1 0.25 movk w3, #0
+# CHECK-NEXT: 1 1 0.25 movz x4, #0, lsl #16
+# CHECK-NEXT: 1 1 0.25 movk w5, #0, lsl #16
+# CHECK-NEXT: 1 1 0.25 movz x6, #0, lsl #32
+# CHECK-NEXT: 1 1 0.25 movk x7, #0, lsl #32
+# CHECK-NEXT: 1 1 0.25 movz x8, #0, lsl #48
+# CHECK-NEXT: 1 1 0.25 movk x9, #0, lsl #48
+# CHECK-NEXT: 1 1 0.50 adr x2, #1600
+# CHECK-NEXT: 1 1 0.50 adrp x21, #6553600
+# CHECK-NEXT: 1 1 0.50 adr x0, #262144
+# CHECK-NEXT: 1 1 0.50 tbz x12, #62, #0
+# CHECK-NEXT: 1 1 0.50 tbz x12, #62, #4
+# CHECK-NEXT: 1 1 0.50 tbz x12, #62, #-32768
+# CHECK-NEXT: 1 1 0.50 tbnz x12, #60, #32764
+# CHECK-NEXT: 1 1 0.50 b #4
+# CHECK-NEXT: 1 1 0.50 b #-4
+# CHECK-NEXT: 1 1 0.50 b #134217724
+# CHECK-NEXT: 1 1 1.00 br x20
+# CHECK-NEXT: 2 1 1.00 blr xzr
+# CHECK-NEXT: 1 1 0.50 U ret x10
+# CHECK-NEXT: 1 1 0.50 U ret
+# CHECK-NEXT: 1 1 1.00 U eret
+# CHECK-NEXT: 1 1 1.00 U drps
+
+# CHECK: Resources:
+# CHECK-NEXT: [0.0] - Ampere1BUnitA
+# CHECK-NEXT: [0.1] - Ampere1BUnitA
+# CHECK-NEXT: [1.0] - Ampere1BUnitB
+# CHECK-NEXT: [1.1] - Ampere1BUnitB
+# CHECK-NEXT: [2] - Ampere1BUnitBS
+# CHECK-NEXT: [3.0] - Ampere1BUnitL
+# CHECK-NEXT: [3.1] - Ampere1BUnitL
+# CHECK-NEXT: [4.0] - Ampere1BUnitS
+# CHECK-NEXT: [4.1] - Ampere1BUnitS
+# CHECK-NEXT: [5] - Ampere1BUnitX
+# CHECK-NEXT: [6] - Ampere1BUnitY
+# CHECK-NEXT: [7] - Ampere1BUnitZ
+
+# CHECK: Resource pressure per iteration:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4.0] [4.1] [5] [6] [7]
+# CHECK-NEXT: 190.00 190.00 211.00 211.00 143.00 130.50 130.50 83.00 83.00 159.00 126.00 150.00
+
+# CHECK: Resource pressure by instruction:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4.0] [4.1] [5] [6] [7] Instructions:
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - add w2, w3, #4095
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - add w30, w29, #1, lsl #12
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - add w13, w5, #4095, lsl #12
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - add x5, x7, #1638
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - add w20, wsp, #801
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - add wsp, wsp, #1104
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - add wsp, w30, #4084
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - add x0, x24, #291
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - add x3, x24, #4095, lsl #12
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - add x8, sp, #1074
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - add sp, x29, #3816
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - sub w0, wsp, #4077
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - sub w4, w20, #546, lsl #12
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - sub sp, sp, #288
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - sub wsp, w19, #16
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - adds w13, w23, #291, lsl #12
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cmn w2, #4095
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - adds w20, wsp, #0
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cmn x3, #1, lsl #12
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cmp sp, #20, lsl #12
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cmp x30, #4095
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - subs x4, sp, #3822
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cmn w3, #291, lsl #12
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cmn wsp, #1365
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cmn sp, #1092, lsl #12
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - mov sp, x30
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - mov wsp, w20
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - mov x11, sp
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - mov w24, wsp
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - add w3, w5, w7
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - add wzr, w3, w5
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - add w20, wzr, w4
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - add w4, w6, wzr
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - add w11, w13, w15
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - add w9, w3, wzr, lsl #10
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - add w17, w29, w20, lsl #31
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - add w21, w22, w23, lsr #0
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - add w24, w25, w26, lsr #18
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - add w27, w28, w29, lsr #31
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - add w2, w3, w4, asr #0
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - add w5, w6, w7, asr #21
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - add w8, w9, w10, asr #31
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - add x3, x5, x7
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - add xzr, x3, x5
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - add x20, xzr, x4
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - add x4, x6, xzr
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - add x11, x13, x15
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - add x9, x3, xzr, lsl #10
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - add x17, x29, x20, lsl #63
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - add x21, x22, x23, lsr #0
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - add x24, x25, x26, lsr #18
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - add x27, x28, x29, lsr #63
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - add x2, x3, x4, asr #0
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - add x5, x6, x7, asr #21
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - add x8, x9, x10, asr #63
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - adds w3, w5, w7
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - cmn w3, w5
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - adds w20, wzr, w4
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - adds w4, w6, wzr
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - adds w11, w13, w15
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - adds w9, w3, wzr, lsl #10
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - adds w17, w29, w20, lsl #31
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - adds w21, w22, w23, lsr #0
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - adds w24, w25, w26, lsr #18
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - adds w27, w28, w29, lsr #31
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - adds w2, w3, w4, asr #0
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - adds w5, w6, w7, asr #21
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - adds w8, w9, w10, asr #31
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - adds x3, x5, x7
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - cmn x3, x5
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - adds x20, xzr, x4
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - adds x4, x6, xzr
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - adds x11, x13, x15
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - adds x9, x3, xzr, lsl #10
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - adds x17, x29, x20, lsl #63
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - adds x21, x22, x23, lsr #0
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - adds x24, x25, x26, lsr #18
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - adds x27, x28, x29, lsr #63
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - adds x2, x3, x4, asr #0
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - adds x5, x6, x7, asr #21
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - adds x8, x9, x10, asr #63
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - sub w3, w5, w7
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - sub wzr, w3, w5
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - sub w4, w6, wzr
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - sub w11, w13, w15
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - sub w9, w3, wzr, lsl #10
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - sub w17, w29, w20, lsl #31
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - sub w21, w22, w23, lsr #0
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - sub w24, w25, w26, lsr #18
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - sub w27, w28, w29, lsr #31
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - sub w2, w3, w4, asr #0
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - sub w5, w6, w7, asr #21
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - sub w8, w9, w10, asr #31
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - sub x3, x5, x7
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - sub xzr, x3, x5
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - sub x4, x6, xzr
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - sub x11, x13, x15
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - sub x9, x3, xzr, lsl #10
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - sub x17, x29, x20, lsl #63
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - sub x21, x22, x23, lsr #0
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - sub x24, x25, x26, lsr #18
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - sub x27, x28, x29, lsr #63
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - sub x2, x3, x4, asr #0
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - sub x5, x6, x7, asr #21
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - sub x8, x9, x10, asr #63
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - subs w3, w5, w7
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - cmp w3, w5
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - subs w4, w6, wzr
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - subs w11, w13, w15
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - subs w9, w3, wzr, lsl #10
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - subs w17, w29, w20, lsl #31
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - subs w21, w22, w23, lsr #0
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - subs w24, w25, w26, lsr #18
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - subs w27, w28, w29, lsr #31
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - subs w2, w3, w4, asr #0
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - subs w5, w6, w7, asr #21
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - subs w8, w9, w10, asr #31
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - subs x3, x5, x7
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - cmp x3, x5
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - subs x4, x6, xzr
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - subs x11, x13, x15
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - subs x9, x3, xzr, lsl #10
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - subs x17, x29, x20, lsl #63
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - subs x21, x22, x23, lsr #0
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - subs x24, x25, x26, lsr #18
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - subs x27, x28, x29, lsr #63
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - subs x2, x3, x4, asr #0
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - subs x5, x6, x7, asr #21
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - subs x8, x9, x10, asr #63
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - cmn wzr, w4
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - cmn w5, wzr
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - cmn w6, w7
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmn w8, w9, lsl #15
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmn w10, w11, lsl #31
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmn w12, w13, lsr #0
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmn w14, w15, lsr #21
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmn w16, w17, lsr #31
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmn w18, w19, asr #0
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmn w20, w21, asr #22
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmn w22, w23, asr #31
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - cmn x0, x3
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - cmn xzr, x4
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - cmn x5, xzr
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - cmn x6, x7
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmn x8, x9, lsl #15
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmn x10, x11, lsl #63
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmn x12, x13, lsr #0
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmn x14, x15, lsr #41
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmn x16, x17, lsr #63
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmn x18, x19, asr #0
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmn x20, x21, asr #55
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmn x22, x23, asr #63
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - cmp w0, w3
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - cmp wzr, w4
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - cmp w5, wzr
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - cmp w6, w7
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmp w8, w9, lsl #15
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmp w10, w11, lsl #31
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmp w12, w13, lsr #0
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmp w14, w15, lsr #21
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmp w18, w19, asr #0
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmp w20, w21, asr #22
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmp w22, w23, asr #31
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - cmp x0, x3
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - cmp xzr, x4
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - cmp x5, xzr
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - cmp x6, x7
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmp x8, x9, lsl #15
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmp x10, x11, lsl #63
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmp x12, x13, lsr #0
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmp x14, x15, lsr #41
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmp x16, x17, lsr #63
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmp x18, x19, asr #0
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmp x20, x21, asr #55
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - cmp x22, x23, asr #63
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - cmp wzr, w0
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - cmp xzr, x0
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - adc w29, w27, w25
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - adc wzr, w3, w4
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - adc w9, wzr, w10
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - adc w20, w0, wzr
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - adc x29, x27, x25
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - adc xzr, x3, x4
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - adc x9, xzr, x10
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - adc x20, x0, xzr
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - adcs w29, w27, w25
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - adcs wzr, w3, w4
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - adcs w9, wzr, w10
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - adcs w20, w0, wzr
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - adcs x29, x27, x25
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - adcs xzr, x3, x4
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - adcs x9, xzr, x10
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - adcs x20, x0, xzr
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - sbc w29, w27, w25
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - sbc wzr, w3, w4
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ngc w9, w10
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - sbc w20, w0, wzr
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - sbc x29, x27, x25
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - sbc xzr, x3, x4
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ngc x9, x10
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - sbc x20, x0, xzr
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - sbcs w29, w27, w25
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - sbcs wzr, w3, w4
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ngcs w9, w10
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - sbcs w20, w0, wzr
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - sbcs x29, x27, x25
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - sbcs xzr, x3, x4
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ngcs x9, x10
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - sbcs x20, x0, xzr
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ngc w3, w12
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ngc wzr, w9
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ngc w23, wzr
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ngc x29, x30
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ngc xzr, x0
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ngc x0, xzr
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ngcs w3, w12
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ngcs wzr, w9
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ngcs w23, wzr
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ngcs x29, x30
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ngcs xzr, x0
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ngcs x0, xzr
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - sbfx x1, x2, #3, #2
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - asr x3, x4, #63
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - asr wzr, wzr, #31
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - sbfx w12, w9, #0, #1
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - ubfiz x4, x5, #52, #11
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - ubfx xzr, x4, #0, #1
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - ubfiz x4, xzr, #1, #6
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsr x5, x6, #12
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - bfi x4, x5, #52, #11
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - bfxil xzr, x4, #0, #1
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - bfc x4, #1, #6
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - bfxil x5, x6, #12, #52
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - sxtb w1, w2
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - sxtb xzr, w3
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - sxth w9, w10
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - sxth x0, w1
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - sxtw x3, w30
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - uxtb w1, w2
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - uxth w9, w10
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - ubfx x3, x30, #0, #32
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - asr w3, w2, #0
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - asr w9, w10, #31
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - asr x20, x21, #63
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - asr w1, wzr, #3
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsr w3, w2, #0
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsr w9, w10, #31
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsr x20, x21, #63
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsr wzr, wzr, #3
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsr w3, w2, #0
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsl w9, w10, #31
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsl x20, x21, #63
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsl w1, wzr, #3
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - sbfx w9, w10, #0, #1
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - sbfiz x2, x3, #63, #1
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - asr x19, x20, #0
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - sbfiz x9, x10, #5, #59
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - asr w9, w10, #0
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - sbfiz w11, w12, #31, #1
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - sbfiz w13, w14, #29, #3
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - sbfiz xzr, xzr, #10, #11
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - sbfx w9, w10, #0, #1
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - asr x2, x3, #63
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - asr x19, x20, #0
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - asr x9, x10, #5
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - asr w9, w10, #0
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - asr w11, w12, #31
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - asr w13, w14, #29
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - sbfx xzr, xzr, #10, #11
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - bfxil w9, w10, #0, #1
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - bfi x2, x3, #63, #1
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - bfxil x19, x20, #0, #64
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - bfi x9, x10, #5, #59
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - bfxil w9, w10, #0, #32
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - bfi w11, w12, #31, #1
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - bfi w13, w14, #29, #3
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - bfc xzr, #10, #11
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - bfxil w9, w10, #0, #1
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - bfxil x2, x3, #63, #1
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - bfxil x19, x20, #0, #64
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - bfxil x9, x10, #5, #59
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - bfxil w9, w10, #0, #32
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - bfxil w11, w12, #31, #1
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - bfxil w13, w14, #29, #3
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - bfxil xzr, xzr, #10, #11
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - ubfx w9, w10, #0, #1
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsl x2, x3, #63
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsr x19, x20, #0
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsl x9, x10, #5
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsr w9, w10, #0
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsl w11, w12, #31
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsl w13, w14, #29
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - ubfiz xzr, xzr, #10, #11
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - ubfx w9, w10, #0, #1
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsr x2, x3, #63
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsr x19, x20, #0
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsr x9, x10, #5
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsr w9, w10, #0
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsr w11, w12, #31
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsr w13, w14, #29
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - ubfx xzr, xzr, #10, #11
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cbz w5, #4
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cbz x5, #0
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cbnz x2, #-4
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cbnz x26, #1048572
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cbz wzr, #0
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cbnz xzr, #0
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - b.ne #4
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - b.ge #1048572
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - b.ge #-4
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ccmp w1, #31, #0, eq
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ccmp w3, #0, #15, hs
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ccmp wzr, #15, #13, hs
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ccmp x9, #31, #0, le
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ccmp x3, #0, #15, gt
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ccmp xzr, #5, #7, ne
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ccmn w1, #31, #0, eq
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ccmn w3, #0, #15, hs
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ccmn wzr, #15, #13, hs
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ccmn x9, #31, #0, le
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ccmn x3, #0, #15, gt
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ccmn xzr, #5, #7, ne
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ccmp w1, wzr, #0, eq
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ccmp w3, w0, #15, hs
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ccmp wzr, w15, #13, hs
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ccmp x9, xzr, #0, le
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ccmp x3, x0, #15, gt
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ccmp xzr, x5, #7, ne
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ccmn w1, wzr, #0, eq
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ccmn w3, w0, #15, hs
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ccmn wzr, w15, #13, hs
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ccmn x9, xzr, #0, le
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ccmn x3, x0, #15, gt
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ccmn xzr, x5, #7, ne
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csel w1, w0, w19, ne
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csel wzr, w5, w9, eq
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csel w9, wzr, w30, gt
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csel w1, w28, wzr, mi
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csel x19, x23, x29, lt
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csel xzr, x3, x4, ge
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csel x5, xzr, x6, hs
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csel x7, x8, xzr, lo
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csinc w1, w0, w19, ne
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csinc wzr, w5, w9, eq
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csinc w9, wzr, w30, gt
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csinc w1, w28, wzr, mi
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csinc x19, x23, x29, lt
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csinc xzr, x3, x4, ge
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csinc x5, xzr, x6, hs
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csinc x7, x8, xzr, lo
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csinv w1, w0, w19, ne
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csinv wzr, w5, w9, eq
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csinv w9, wzr, w30, gt
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csinv w1, w28, wzr, mi
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csinv x19, x23, x29, lt
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csinv xzr, x3, x4, ge
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csinv x5, xzr, x6, hs
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csinv x7, x8, xzr, lo
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csneg w1, w0, w19, ne
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csneg wzr, w5, w9, eq
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csneg w9, wzr, w30, gt
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csneg w1, w28, wzr, mi
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csneg x19, x23, x29, lt
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csneg xzr, x3, x4, ge
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csneg x5, xzr, x6, hs
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csneg x7, x8, xzr, lo
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cset w3, eq
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cset x9, pl
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csetm w20, ne
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csetm x30, ge
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csinc w2, wzr, wzr, al
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csinv x3, xzr, xzr, nv
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cinc w3, w5, gt
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cinc wzr, w4, le
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cset w9, lt
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cinc x3, x5, gt
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cinc xzr, x4, le
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cset x9, lt
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csinc w5, w6, w6, nv
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csinc x1, x2, x2, al
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cinv w3, w5, gt
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cinv wzr, w4, le
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csetm w9, lt
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cinv x3, x5, gt
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cinv xzr, x4, le
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csetm x9, lt
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csinv x1, x0, x0, al
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csinv w9, w8, w8, nv
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cneg w3, w5, gt
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cneg wzr, w4, le
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cneg w9, wzr, lt
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cneg x3, x5, gt
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cneg xzr, x4, le
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - cneg x9, xzr, lt
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csneg x4, x8, x8, al
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - csinv w9, w8, w8, nv
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - rbit w0, w7
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - rbit x18, x3
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - rev16 w17, w1
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - rev16 x5, x2
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - rev w18, w0
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - rev32 x20, x1
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - rev x22, x2
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - clz w24, w3
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - clz x26, x4
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - cls w3, w5
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - cls x20, x5
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 - - udiv w0, w7, w10
+# CHECK-NEXT: - - - - 2.00 - - - - 1.00 - - udiv x9, x22, x4
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 - - sdiv w12, w21, w0
+# CHECK-NEXT: - - - - 2.00 - - - - 1.00 - - sdiv x13, x2, x1
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsl w11, w12, w13
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsl x14, x15, x16
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsr w17, w18, w19
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsr x20, x21, x22
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - asr w23, w24, w25
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - asr x26, x27, x28
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - ror w0, w1, w2
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - ror x3, x4, x5
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsl w6, w7, w8
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsl x9, x10, x11
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsr w12, w13, w14
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - lsr x15, x16, x17
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - asr w18, w19, w20
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - asr x21, x22, x23
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - ror w24, w25, w26
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - ror x27, x28, x29
+# CHECK-NEXT: - - - - 1.00 - - - - - - - smulh x30, x29, x28
+# CHECK-NEXT: - - - - 1.00 - - - - - - - smulh xzr, x27, x26
+# CHECK-NEXT: - - - - 1.00 - - - - - - - umulh x30, x29, x28
+# CHECK-NEXT: - - - - 1.00 - - - - - - - umulh x23, x30, xzr
+# CHECK-NEXT: - - - - 1.00 - - - - - - - madd w1, w3, w7, w4
+# CHECK-NEXT: - - - - 1.00 - - - - - - - madd wzr, w0, w9, w11
+# CHECK-NEXT: - - - - 1.00 - - - - - - - madd w13, wzr, w4, w4
+# CHECK-NEXT: - - - - 1.00 - - - - - - - madd w19, w30, wzr, w29
+# CHECK-NEXT: - - - - 1.00 - - - - - - - mul w4, w5, w6
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - madd x1, x3, x7, x4
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - madd xzr, x0, x9, x11
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - madd x13, xzr, x4, x4
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - madd x19, x30, xzr, x29
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - mul x4, x5, x6
+# CHECK-NEXT: - - - - 1.00 - - - - - - - msub w1, w3, w7, w4
+# CHECK-NEXT: - - - - 1.00 - - - - - - - msub wzr, w0, w9, w11
+# CHECK-NEXT: - - - - 1.00 - - - - - - - msub w13, wzr, w4, w4
+# CHECK-NEXT: - - - - 1.00 - - - - - - - msub w19, w30, wzr, w29
+# CHECK-NEXT: - - - - 1.00 - - - - - - - mneg w4, w5, w6
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - msub x1, x3, x7, x4
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - msub xzr, x0, x9, x11
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - msub x13, xzr, x4, x4
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - msub x19, x30, xzr, x29
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - mneg x4, x5, x6
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - smaddl x3, w5, w2, x9
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - smaddl xzr, w10, w11, x12
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - smaddl x13, wzr, w14, x15
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - smaddl x16, w17, wzr, x18
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - smull x19, w20, w21
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - smsubl x3, w5, w2, x9
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - smsubl xzr, w10, w11, x12
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - smsubl x13, wzr, w14, x15
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - smsubl x16, w17, wzr, x18
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - smnegl x19, w20, w21
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - umaddl x3, w5, w2, x9
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - umaddl xzr, w10, w11, x12
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - umaddl x13, wzr, w14, x15
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - umaddl x16, w17, wzr, x18
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - umull x19, w20, w21
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - umsubl x3, w5, w2, x9
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - umsubl x16, w17, wzr, x18
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - umnegl x19, w20, w21
+# CHECK-NEXT: - - - - 1.00 - - - - - - - smulh x30, x29, x28
+# CHECK-NEXT: - - - - 1.00 - - - - - - - smulh x23, x22, xzr
+# CHECK-NEXT: - - - - 1.00 - - - - - - - umulh x23, x22, xzr
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - mul x19, x20, xzr
+# CHECK-NEXT: - - - - 1.00 - - - - - - - mneg w21, w22, w23
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - smull x11, w13, w17
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - umull x11, w13, w17
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - smnegl x11, w13, w17
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 1.00 - - - - - - - umnegl x11, w13, w17
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - extr w3, w5, w7, #0
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - extr w11, w13, w17, #31
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - extr x3, x5, x7, #15
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - extr x11, x13, x17, #63
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - ror x19, x23, #24
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - ror x29, xzr, #63
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - ror w9, w13, #31
+# CHECK-NEXT: - - - - - - - - - 1.00 - - fcmp s3, s5
+# CHECK-NEXT: - - - - - - - - - 1.00 - - fcmp s31, #0.0
+# CHECK-NEXT: - - - - - - - - - 1.00 - - fcmp s31, #0.0
+# CHECK-NEXT: - - - - - - - - - 1.00 - - fcmpe s29, s30
+# CHECK-NEXT: - - - - - - - - - 1.00 - - fcmpe s15, #0.0
+# CHECK-NEXT: - - - - - - - - - 1.00 - - fcmpe s15, #0.0
+# CHECK-NEXT: - - - - - - - - - 1.00 - - fcmp d4, d12
+# CHECK-NEXT: - - - - - - - - - 1.00 - - fcmp d23, #0.0
+# CHECK-NEXT: - - - - - - - - - 1.00 - - fcmp d23, #0.0
+# CHECK-NEXT: - - - - - - - - - 1.00 - - fcmpe d26, d22
+# CHECK-NEXT: - - - - - - - - - 1.00 - - fcmpe d29, #0.0
+# CHECK-NEXT: - - - - - - - - - 1.00 - - fcmpe d29, #0.0
+# CHECK-NEXT: 0.50 0.50 - - 1.00 - - - - 1.00 - - fccmp s1, s31, #0, eq
+# CHECK-NEXT: 0.50 0.50 - - 1.00 - - - - 1.00 - - fccmp s3, s0, #15, hs
+# CHECK-NEXT: 0.50 0.50 - - 1.00 - - - - 1.00 - - fccmp s31, s15, #13, hs
+# CHECK-NEXT: 0.50 0.50 - - 1.00 - - - - 1.00 - - fccmp d9, d31, #0, le
+# CHECK-NEXT: 0.50 0.50 - - 1.00 - - - - 1.00 - - fccmp d3, d0, #15, gt
+# CHECK-NEXT: 0.50 0.50 - - 1.00 - - - - 1.00 - - fccmp d31, d5, #7, ne
+# CHECK-NEXT: 0.50 0.50 - - 1.00 - - - - 1.00 - - fccmpe s1, s31, #0, eq
+# CHECK-NEXT: 0.50 0.50 - - 1.00 - - - - 1.00 - - fccmpe s3, s0, #15, hs
+# CHECK-NEXT: 0.50 0.50 - - 1.00 - - - - 1.00 - - fccmpe s31, s15, #13, hs
+# CHECK-NEXT: 0.50 0.50 - - 1.00 - - - - 1.00 - - fccmpe d9, d31, #0, le
+# CHECK-NEXT: 0.50 0.50 - - 1.00 - - - - 1.00 - - fccmpe d3, d0, #15, gt
+# CHECK-NEXT: 0.50 0.50 - - 1.00 - - - - 1.00 - - fccmpe d31, d5, #7, ne
+# CHECK-NEXT: 0.50 0.50 - - 1.00 - - - - 0.50 0.50 - fcsel s3, s20, s9, pl
+# CHECK-NEXT: 0.50 0.50 - - 1.00 - - - - 0.50 0.50 - fcsel d9, d10, d11, mi
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmov s0, s1
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fabs s2, s3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fneg s4, s5
+# CHECK-NEXT: - - - - - - - - - 1.00 - - fsqrt s6, s7
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvt d8, s9
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvt h10, s11
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintn s12, s13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintp s14, s15
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintm s16, s17
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintz s18, s19
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frinta s20, s21
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintx s22, s23
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frinti s24, s25
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmov d0, d1
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fabs d2, d3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fneg d4, d5
+# CHECK-NEXT: - - - - - - - - - 1.00 - - fsqrt d6, d7
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvt s8, d9
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvt h10, d11
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintn d12, d13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintp d14, d15
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintm d16, d17
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintz d18, d19
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frinta d20, d21
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintx d22, d23
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frinti d24, d25
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvt s26, h27
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvt d28, h29
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmul s20, s19, s17
+# CHECK-NEXT: - - - - - - - - - 1.00 - - fdiv s1, s2, s3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fadd s4, s5, s6
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fsub s7, s8, s9
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmax s10, s11, s12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmin s13, s14, s15
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmaxnm s16, s17, s18
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fminnm s19, s20, s21
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fnmul s22, s23, s2
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmul d20, d19, d17
+# CHECK-NEXT: - - - - - - - - - 1.00 - - fdiv d1, d2, d3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fadd d4, d5, d6
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fsub d7, d8, d9
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmax d10, d11, d12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmin d13, d14, d15
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmaxnm d16, d17, d18
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fminnm d19, d20, d21
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fnmul d22, d23, d24
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmadd s3, s5, s6, s31
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmadd d3, d13, d0, d23
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmsub s3, s5, s6, s31
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmsub d3, d13, d0, d23
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fnmadd s3, s5, s6, s31
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fnmadd d3, d13, d0, d23
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fnmsub s3, s5, s6, s31
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fnmsub d3, d13, d0, d23
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzs w3, h5, #1
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzs wzr, h20, #13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzs w19, h0, #32
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzs x3, h5, #1
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzs x12, h30, #45
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzs x19, h0, #64
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzs w3, s5, #1
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzs wzr, s20, #13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzs w19, s0, #32
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzs x3, s5, #1
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzs x12, s30, #45
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzs x19, s0, #64
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzs w3, d5, #1
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzs wzr, d20, #13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzs w19, d0, #32
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzs x3, d5, #1
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzs x12, d30, #45
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzs x19, d0, #64
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzu w3, h5, #1
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzu wzr, h20, #13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzu w19, h0, #32
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzu x3, h5, #1
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzu x12, h30, #45
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzu x19, h0, #64
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzu w3, s5, #1
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzu wzr, s20, #13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzu w19, s0, #32
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzu x3, s5, #1
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzu x12, s30, #45
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzu x19, s0, #64
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzu w3, d5, #1
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzu wzr, d20, #13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzu w19, d0, #32
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzu x3, d5, #1
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzu x12, d30, #45
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzu x19, d0, #64
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - scvtf h23, w19, #1
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - scvtf h31, wzr, #20
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - scvtf h14, w0, #32
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - scvtf h23, x19, #1
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - scvtf h31, xzr, #20
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - scvtf h14, x0, #64
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - scvtf s23, w19, #1
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - scvtf s31, wzr, #20
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - scvtf s14, w0, #32
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - scvtf s23, x19, #1
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - scvtf s31, xzr, #20
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - scvtf s14, x0, #64
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - scvtf d23, w19, #1
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - scvtf d31, wzr, #20
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - scvtf d14, w0, #32
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - scvtf d23, x19, #1
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - scvtf d31, xzr, #20
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - scvtf d14, x0, #64
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - ucvtf h23, w19, #1
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - ucvtf h31, wzr, #20
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - ucvtf h14, w0, #32
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - ucvtf h23, x19, #1
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - ucvtf h31, xzr, #20
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - ucvtf h14, x0, #64
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - ucvtf s23, w19, #1
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - ucvtf s31, wzr, #20
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - ucvtf s14, w0, #32
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - ucvtf s23, x19, #1
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - ucvtf s31, xzr, #20
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - ucvtf s14, x0, #64
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - ucvtf d23, w19, #1
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - ucvtf d31, wzr, #20
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - ucvtf d14, w0, #32
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - ucvtf d23, x19, #1
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - ucvtf d31, xzr, #20
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - ucvtf d14, x0, #64
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtns w3, h31
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtns xzr, h12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtnu wzr, h12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtnu x0, h0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtps wzr, h9
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtps x12, h20
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtpu w30, h23
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtpu x29, h3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtms w2, h3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtms x4, h5
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtmu w6, h7
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtmu x8, h9
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzs w10, h11
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzs x12, h13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzu w14, h15
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzu x15, h16
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - scvtf h17, w18
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - scvtf h19, x20
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - ucvtf h21, w22
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - scvtf h23, x24
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtas w25, h26
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtas x27, h28
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtau w29, h30
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtau xzr, h0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtns w3, s31
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtns xzr, s12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtnu wzr, s12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtnu x0, s0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtps wzr, s9
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtps x12, s20
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtpu w30, s23
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtpu x29, s3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtms w2, s3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtms x4, s5
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtmu w6, s7
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtmu x8, s9
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzs w10, s11
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzs x12, s13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzu w14, s15
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzu x15, s16
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - scvtf s17, w18
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - scvtf s19, x20
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - ucvtf s21, w22
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - scvtf s23, x24
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtas w25, s26
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtas x27, s28
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtau w29, s30
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtau xzr, s0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtns w3, d31
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtns xzr, d12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtnu wzr, d12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtnu x0, d0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtps wzr, d9
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtps x12, d20
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtpu w30, d23
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtpu x29, d3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtms w2, d3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtms x4, d5
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtmu w6, d7
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtmu x8, d9
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzs w10, d11
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzs x12, d13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzu w14, d15
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtzu x15, d16
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - scvtf d17, w18
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - scvtf d19, x20
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - ucvtf d21, w22
+# CHECK-NEXT: - - - - 1.00 - - - - 1.00 1.00 - ucvtf d23, x24
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtas w25, d26
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtas x27, d28
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtau w29, d30
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 1.00 fcvtau xzr, d0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - fmov w3, s9
+# CHECK-NEXT: - - - - - - - - - - - 1.00 fmov s9, w3
+# CHECK-NEXT: - - - - 1.00 - - - - - - - fmov x20, d31
+# CHECK-NEXT: - - - - - - - - - - - 1.00 fmov d1, x15
+# CHECK-NEXT: - - - - 1.00 - - - - 0.50 0.50 - fmov x3, v12.d[1]
+# CHECK-NEXT: - - - - - - - - - 1.00 - - fmov v1.d[1], x19
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmov s2, #0.12500000
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmov s3, #1.00000000
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmov d30, #16.00000000
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmov s4, #1.06250000
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmov d10, #1.93750000
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmov s12, #-1.00000000
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmov d16, #8.50000000
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr w3, #0
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr x29, #4
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldrsw xzr, #-4
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr s0, #8
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr d0, #1048572
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr q0, #-1048576
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - prfm pldl1strm, #0
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - prfm #22, #0
+# CHECK-NEXT: - - - - - 0.50 0.50 0.50 0.50 - - - stxrb w18, w8, [sp]
+# CHECK-NEXT: - - - - - 0.50 0.50 0.50 0.50 - - - stxrh w24, w15, [x16]
+# CHECK-NEXT: - - - - - 0.50 0.50 0.50 0.50 - - - stxr w5, w6, [x17]
+# CHECK-NEXT: - - - - - 0.50 0.50 0.50 0.50 - - - stxr w1, x10, [x21]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldxrb w30, [x0]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldxrh w17, [x4]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldxr w22, [sp]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldxr x11, [x29]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldxr x11, [x29]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldxr x11, [x29]
+# CHECK-NEXT: - - - - - 0.50 0.50 0.50 0.50 - - - stxp w12, w11, w10, [sp]
+# CHECK-NEXT: - - - - - 0.50 0.50 0.50 0.50 - - - stxp wzr, x27, x9, [x12]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldxp w0, wzr, [sp]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldxp x17, x0, [x18]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldxp x17, x0, [x18]
+# CHECK-NEXT: - - - - - 0.50 0.50 0.50 0.50 - - - stlxrb w12, w22, [x0]
+# CHECK-NEXT: - - - - - 0.50 0.50 0.50 0.50 - - - stlxrh w10, w1, [x1]
+# CHECK-NEXT: - - - - - 0.50 0.50 0.50 0.50 - - - stlxr w9, w2, [x2]
+# CHECK-NEXT: - - - - - 0.50 0.50 0.50 0.50 - - - stlxr w9, x3, [sp]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldaxrb w8, [x4]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldaxrh w7, [x5]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldaxr w6, [sp]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldaxr x5, [x6]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldaxr x5, [x6]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldaxr x5, [x6]
+# CHECK-NEXT: - - - - - 0.50 0.50 0.50 0.50 - - - stlxp w4, w5, w6, [sp]
+# CHECK-NEXT: - - - - - 0.50 0.50 0.50 0.50 - - - stlxp wzr, x6, x7, [x1]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldaxp w5, w18, [sp]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldaxp x6, x19, [x22]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldaxp x6, x19, [x22]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stlrb w24, [sp]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stlrh w25, [x30]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stlr w26, [x29]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stlr x27, [x28]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stlr x27, [x28]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stlr x27, [x28]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldarb w23, [sp]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldarh w22, [x30]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldar wzr, [x29]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldar x21, [x28]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldar x21, [x28]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldar x21, [x28]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - sturb w9, [sp]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - sturh wzr, [x12, #255]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stur w16, [x0, #-256]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stur x28, [x14, #1]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldurb w1, [x20, #255]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldurh w20, [x1, #255]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldur w12, [sp, #255]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldur xzr, [x12, #255]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldursb x9, [x7, #-256]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldursh x17, [x19, #-256]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldursw x20, [x15, #-256]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - prfum pldl2keep, [sp, #-256]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldursb w19, [x1, #-256]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldursh w15, [x21, #-256]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - 1.00 stur b0, [sp, #1]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - 1.00 stur h12, [x12, #-1]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - 1.00 stur s15, [x0, #255]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - 1.00 stur d31, [x5, #25]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - 1.00 stur q9, [x5]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldur b3, [sp]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldur h5, [x4, #-256]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldur s7, [x12, #-1]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldur d11, [x19, #4]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldur q13, [x1, #2]
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - strb w9, [x2], #255
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - strb w10, [x3], #1
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - strb w10, [x3], #-256
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - strh w9, [x2], #255
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - strh w9, [x2], #1
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - strh w10, [x3], #-256
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str w19, [sp], #255
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str w20, [x30], #1
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str w21, [x12], #-256
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str xzr, [x9], #255
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str x2, [x3], #1
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str x19, [x12], #-256
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrb w9, [x2], #255
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrb w10, [x3], #1
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrb w10, [x3], #-256
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrh w9, [x2], #255
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrh w9, [x2], #1
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrh w10, [x3], #-256
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr w19, [sp], #255
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr w20, [x30], #1
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr w21, [x12], #-256
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr xzr, [x9], #255
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr x2, [x3], #1
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr x19, [x12], #-256
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsb xzr, [x9], #255
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsb x2, [x3], #1
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsb x19, [x12], #-256
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsh xzr, [x9], #255
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsh x2, [x3], #1
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsh x19, [x12], #-256
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsw xzr, [x9], #255
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsw x2, [x3], #1
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsw x19, [x12], #-256
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsb wzr, [x9], #255
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsb w2, [x3], #1
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsb w19, [x12], #-256
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsh wzr, [x9], #255
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsh w2, [x3], #1
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsh w19, [x12], #-256
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str b0, [x0], #255
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str b3, [x3], #1
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str b5, [sp], #-256
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str h10, [x10], #255
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str h13, [x23], #1
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str h15, [sp], #-256
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str s20, [x20], #255
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str s23, [x23], #1
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str s25, [x0], #-256
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str d20, [x20], #255
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str d23, [x23], #1
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str d25, [x0], #-256
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr b0, [x0], #255
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr b3, [x3], #1
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr b5, [sp], #-256
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr h10, [x10], #255
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr h13, [x23], #1
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr h15, [sp], #-256
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr s20, [x20], #255
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr s23, [x23], #1
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr s25, [x0], #-256
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr d20, [x20], #255
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr d23, [x23], #1
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr d25, [x0], #-256
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr q20, [x1], #255
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr q23, [x9], #1
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr q25, [x20], #-256
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str q10, [x1], #255
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str q22, [sp], #1
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str q21, [x20], #-256
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr x3, [x4, #0]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - strb w9, [x2, #255]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - strb w10, [x3, #1]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - strb w10, [x3, #-256]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - strh w9, [x2, #255]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - strh w9, [x2, #1]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - strh w10, [x3, #-256]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str w19, [sp, #255]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str w20, [x30, #1]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str w21, [x12, #-256]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str xzr, [x9, #255]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str x2, [x3, #1]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str x19, [x12, #-256]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrb w9, [x2, #255]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrb w10, [x3, #1]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrb w10, [x3, #-256]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrh w9, [x2, #255]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrh w9, [x2, #1]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrh w10, [x3, #-256]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr w19, [sp, #255]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr w20, [x30, #1]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr w21, [x12, #-256]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr xzr, [x9, #255]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr x2, [x3, #1]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr x19, [x12, #-256]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsb xzr, [x9, #255]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsb x2, [x3, #1]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsb x19, [x12, #-256]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsh xzr, [x9, #255]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsh x2, [x3, #1]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsh x19, [x12, #-256]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsw xzr, [x9, #255]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsw x2, [x3, #1]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsw x19, [x12, #-256]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsb wzr, [x9, #255]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsb w2, [x3, #1]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsb w19, [x12, #-256]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsh wzr, [x9, #255]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsh w2, [x3, #1]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldrsh w19, [x12, #-256]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str b0, [x0, #255]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str b3, [x3, #1]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str b5, [sp, #-256]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str h10, [x10, #255]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str h13, [x23, #1]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str h15, [sp, #-256]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str s20, [x20, #255]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str s23, [x23, #1]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str s25, [x0, #-256]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str d20, [x20, #255]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str d23, [x23, #1]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str d25, [x0, #-256]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr b0, [x0, #255]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr b3, [x3, #1]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr b5, [sp, #-256]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr h10, [x10, #255]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr h13, [x23, #1]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr h15, [sp, #-256]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr s20, [x20, #255]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr s23, [x23, #1]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr s25, [x0, #-256]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr d20, [x20, #255]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr d23, [x23, #1]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr d25, [x0, #-256]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr q20, [x1, #255]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr q23, [x9, #1]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - 0.50 0.50 - - - - - ldr q25, [x20, #-256]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str q10, [x1, #255]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str q22, [sp, #1]!
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - str q21, [x20, #-256]!
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - sttrb w9, [sp]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - sttrh wzr, [x12, #255]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - sttr w16, [x0, #-256]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - sttr x28, [x14, #1]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldtrb w1, [x20, #255]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldtrh w20, [x1, #255]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldtr w12, [sp, #255]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldtr xzr, [x12, #255]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldtrsb x9, [x7, #-256]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldtrsh x17, [x19, #-256]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldtrsw x20, [x15, #-256]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldtrsb w19, [x1, #-256]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldtrsh w15, [x21, #-256]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr x4, [x29]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr x30, [x12, #32760]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr x20, [sp, #8]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr xzr, [sp]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr w2, [sp]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr w17, [sp, #16380]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr w13, [x2, #4]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldrsw x2, [x5, #4]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldrsw x23, [sp, #16380]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldrh w2, [x4]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldrsh w23, [x6, #8190]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldrsh wzr, [sp, #2]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldrsh x29, [x2, #2]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldrb w26, [x3, #121]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldrb w12, [x2]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldrsb w27, [sp, #4095]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldrsb xzr, [x15]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - str x30, [sp]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - str w20, [x4, #16380]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - strh w17, [sp, #8190]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - strb w23, [x3, #4095]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - strb wzr, [x2]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr b31, [sp, #4095]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr h20, [x2, #8190]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr s10, [x19, #16380]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr d3, [x10, #32760]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - 1.00 str q12, [sp, #65520]
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - ldrb w3, [sp, x5]
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - ldrb w9, [x27, x6]
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - ldrsb w10, [x30, x7]
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - ldrb w11, [x29, x3, sxtx]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - strb w12, [x28, xzr, sxtx]
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - ldrb w14, [x26, w6, uxtw]
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - ldrsb w15, [x25, w7, uxtw]
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - ldrb w17, [x23, w9, sxtw]
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - ldrsb x18, [x22, w10, sxtw]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldrsh w3, [sp, x5]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldrsh w9, [x27, x6]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldrh w10, [x30, x7, lsl #1]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - strh w11, [x29, x3, sxtx]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldrh w12, [x28, xzr, sxtx]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldrsh x13, [x27, x5, sxtx #1]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldrh w14, [x26, w6, uxtw]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldrh w15, [x25, w7, uxtw]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldrsh w16, [x24, w8, uxtw #1]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldrh w17, [x23, w9, sxtw]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldrh w18, [x22, w10, sxtw]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - strh w19, [x21, wzr, sxtw #1]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr w3, [sp, x5]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr s9, [x27, x6]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr w10, [x30, x7, lsl #2]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr w11, [x29, x3, sxtx]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - 1.00 str s12, [x28, xzr, sxtx]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - str w13, [x27, x5, sxtx #2]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - str w14, [x26, w6, uxtw]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr w15, [x25, w7, uxtw]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr w16, [x24, w8, uxtw #2]
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - ldrsw x17, [x23, w9, sxtw]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr w18, [x22, w10, sxtw]
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - 0.50 0.50 - - - ldrsw x19, [x21, wzr, sxtw #2]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr x3, [sp, x5]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - str x9, [x27, x6]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr d10, [x30, x7, lsl #3]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - str x11, [x29, x3, sxtx]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr x12, [x28, xzr, sxtx]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr x13, [x27, x5, sxtx #3]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - prfm pldl1keep, [x26, w6, uxtw]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr x15, [x25, w7, uxtw]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr x16, [x24, w8, uxtw #3]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr x17, [x23, w9, sxtw]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr x18, [x22, w10, sxtw]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - 1.00 str d19, [x21, wzr, sxtw #3]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr q3, [sp, x5]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr q9, [x27, x6]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr q10, [x30, x7, lsl #4]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - 1.00 str q11, [x29, x3, sxtx]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - 1.00 str q12, [x28, xzr, sxtx]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - 1.00 str q13, [x27, x5, sxtx #4]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr q14, [x26, w6, uxtw]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr q15, [x25, w7, uxtw]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr q16, [x24, w8, uxtw #4]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr q17, [x23, w9, sxtw]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - 1.00 str q18, [x22, w10, sxtw]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldr q19, [x21, wzr, sxtw #4]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldp w3, w5, [sp]
+# CHECK-NEXT: - - 0.50 0.50 - - - 0.50 0.50 - - - stp wzr, w9, [sp, #252]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldp w2, wzr, [sp, #-256]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldp w9, w10, [sp, #4]
+# CHECK-NEXT: - - - - 1.00 0.50 0.50 - - - - - ldpsw x9, x10, [sp, #4]
+# CHECK-NEXT: - - - - 1.00 0.50 0.50 - - - - - ldpsw x9, x10, [x2, #-256]
+# CHECK-NEXT: - - - - 1.00 0.50 0.50 - - - - - ldpsw x20, x30, [sp, #252]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldp x21, x29, [x2, #504]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldp x22, x23, [x3, #-512]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldp x24, x25, [x4, #8]
+# CHECK-NEXT: - - - - 1.00 0.50 0.50 - - - - - ldp s29, s28, [sp, #252]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - 2.00 stp s27, s26, [sp, #-256]
+# CHECK-NEXT: - - - - 1.00 0.50 0.50 - - - - - ldp s1, s2, [x3, #44]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - 2.00 stp d3, d5, [x9, #504]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - 2.00 stp d7, d11, [x10, #-512]
+# CHECK-NEXT: - - - - 1.00 0.50 0.50 - - - - - ldp d2, d3, [x30, #-8]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - 2.00 stp q3, q5, [sp]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - 2.00 stp q17, q19, [sp, #1008]
+# CHECK-NEXT: - - - - - 1.00 1.00 - - - - - ldp q23, q29, [x1, #-1024]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldp w3, w5, [sp], #0
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - 0.50 0.50 - - - stp wzr, w9, [sp], #252
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldp w2, wzr, [sp], #-256
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldp w9, w10, [sp], #4
+# CHECK-NEXT: - - - - 1.00 0.50 0.50 - - - - - ldpsw x9, x10, [sp], #4
+# CHECK-NEXT: - - - - 1.00 0.50 0.50 - - - - - ldpsw x9, x10, [x2], #-256
+# CHECK-NEXT: - - - - 1.00 0.50 0.50 - - - - - ldpsw x20, x30, [sp], #252
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldp x21, x29, [x2], #504
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldp x22, x23, [x3], #-512
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldp x24, x25, [x4], #8
+# CHECK-NEXT: - - - - 1.00 0.50 0.50 - - - - - ldp s29, s28, [sp], #252
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - 2.00 stp s27, s26, [sp], #-256
+# CHECK-NEXT: - - - - 1.00 0.50 0.50 - - - - - ldp s1, s2, [x3], #44
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - 2.00 stp d3, d5, [x9], #504
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - 2.00 stp d7, d11, [x10], #-512
+# CHECK-NEXT: - - - - 1.00 0.50 0.50 - - - - - ldp d2, d3, [x30], #-8
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - 2.00 stp q3, q5, [sp], #0
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - 2.00 stp q17, q19, [sp], #1008
+# CHECK-NEXT: - - - - - 1.00 1.00 - - - - - ldp q23, q29, [x1], #-1024
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldp w3, w5, [sp, #0]!
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - 0.50 0.50 - - - stp wzr, w9, [sp, #252]!
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldp w2, wzr, [sp, #-256]!
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldp w9, w10, [sp, #4]!
+# CHECK-NEXT: - - - - 1.00 0.50 0.50 - - - - - ldpsw x9, x10, [sp, #4]!
+# CHECK-NEXT: - - - - 1.00 0.50 0.50 - - - - - ldpsw x9, x10, [x2, #-256]!
+# CHECK-NEXT: - - - - 1.00 0.50 0.50 - - - - - ldpsw x20, x30, [sp, #252]!
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldp x21, x29, [x2, #504]!
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldp x22, x23, [x3, #-512]!
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldp x24, x25, [x4, #8]!
+# CHECK-NEXT: - - - - 1.00 0.50 0.50 - - - - - ldp s29, s28, [sp, #252]!
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - 2.00 stp s27, s26, [sp, #-256]!
+# CHECK-NEXT: - - - - 1.00 0.50 0.50 - - - - - ldp s1, s2, [x3, #44]!
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - 2.00 stp d3, d5, [x9, #504]!
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - 2.00 stp d7, d11, [x10, #-512]!
+# CHECK-NEXT: - - - - 1.00 0.50 0.50 - - - - - ldp d2, d3, [x30, #-8]!
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - 2.00 stp q3, q5, [sp, #0]!
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - 2.00 stp q17, q19, [sp, #1008]!
+# CHECK-NEXT: - - - - - 1.00 1.00 - - - - - ldp q23, q29, [x1, #-1024]!
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldnp w3, w5, [sp]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stnp wzr, w9, [sp, #252]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldnp w2, wzr, [sp, #-256]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldnp w9, w10, [sp, #4]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldnp x21, x29, [x2, #504]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldnp x22, x23, [x3, #-512]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ldnp x24, x25, [x4, #8]
+# CHECK-NEXT: - - - - 1.00 0.50 0.50 - - - - - ldnp s29, s28, [sp, #252]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - 2.00 stnp s27, s26, [sp, #-256]
+# CHECK-NEXT: - - - - 1.00 0.50 0.50 - - - - - ldnp s1, s2, [x3, #44]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - 2.00 stnp d3, d5, [x9, #504]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - 2.00 stnp d7, d11, [x10, #-512]
+# CHECK-NEXT: - - - - 1.00 0.50 0.50 - - - - - ldnp d2, d3, [x30, #-8]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - 2.00 stnp q3, q5, [sp]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - 2.00 stnp q17, q19, [sp, #1008]
+# CHECK-NEXT: - - - - - 1.00 1.00 - - - - - ldnp q23, q29, [x1, #-1024]
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - mov w3, #983055
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - mov x10, #-6148914691236517206
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - and w12, w23, w21
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - and w16, w15, w1, lsl #1
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - and w9, w4, w10, lsl #31
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - and w3, w30, w11
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - and x3, x5, x7, lsl #63
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - and x5, x14, x19, asr #4
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - and w3, w17, w19, ror #31
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - and w0, w2, wzr, lsr #17
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - and w3, w30, w11, asr #2
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - and xzr, x4, x26
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - and w3, wzr, w20, ror #2
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - and x7, x20, xzr, asr #63
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - bic x13, x20, x14, lsl #47
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - bic w2, w7, w9
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - orr w2, w7, w0, asr #31
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - orr x8, x9, x10, lsl #12
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - orn x3, x5, x7, asr #2
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - orn w2, w5, w29
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - ands w7, wzr, w9, lsl #1
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - ands x3, x5, x20, ror #63
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - bics w3, w5, w7
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - bics x3, xzr, x3, lsl #1
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - tst w3, w7, lsl #31
+# CHECK-NEXT: 0.25 0.25 0.75 0.75 - - - - - - - - tst x2, x20, asr #2
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - mov x3, x6
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - mov x3, xzr
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - mov wzr, w2
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - mov w3, w5
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - movz w2, #0, lsl #16
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - mov w2, #-1235
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - mov x2, #5299989643264
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - mov x2, #0
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - movk w3, #0
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - movz x4, #0, lsl #16
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - movk w5, #0, lsl #16
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - movz x6, #0, lsl #32
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - movk x7, #0, lsl #32
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - movz x8, #0, lsl #48
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - movk x9, #0, lsl #48
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - adr x2, #1600
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - adrp x21, #6553600
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - adr x0, #262144
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - tbz x12, #62, #0
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - tbz x12, #62, #4
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - tbz x12, #62, #-32768
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - tbnz x12, #60, #32764
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - b #4
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - b #-4
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - b #134217724
+# CHECK-NEXT: 1.00 1.00 - - - - - - - - - - br x20
+# CHECK-NEXT: 1.00 1.00 - - - - - - - - - - blr xzr
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ret x10
+# CHECK-NEXT: 0.50 0.50 - - - - - - - - - - ret
+# CHECK-NEXT: 1.00 1.00 - - - - - - - - - - eret
+# CHECK-NEXT: 1.00 1.00 - - - - - - - - - - drps
diff --git a/llvm/test/tools/llvm-mca/AArch64/Ampere/Ampere1B/cssc-instructions.s b/llvm/test/tools/llvm-mca/AArch64/Ampere/Ampere1B/cssc-instructions.s
new file mode 100644
index 0000000..a19a106
--- /dev/null
+++ b/llvm/test/tools/llvm-mca/AArch64/Ampere/Ampere1B/cssc-instructions.s
@@ -0,0 +1,76 @@
+# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
+# RUN: llvm-mca -mtriple=aarch64 -mcpu=ampere1b -instruction-tables < %s | FileCheck %s
+
+abs w1, w2
+abs x2, x3
+cnt w3, w4
+cnt x4, x5
+ctz w5, w6
+ctz x6, x7
+smax w7, w8, w9
+smax x8, x9, x10
+umax w9, w10, w11
+umax x10, x11, x12
+smin w11, w12, w13
+smin w12, w13, w14
+umin w13, w14, w15
+umin x14, x15, x16
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 1 1 0.25 abs w1, w2
+# CHECK-NEXT: 1 1 0.25 abs x2, x3
+# CHECK-NEXT: 1 3 1.00 cnt w3, w4
+# CHECK-NEXT: 1 3 1.00 cnt x4, x5
+# CHECK-NEXT: 1 1 0.50 ctz w5, w6
+# CHECK-NEXT: 1 1 0.50 ctz x6, x7
+# CHECK-NEXT: 2 1 0.50 smax w7, w8, w9
+# CHECK-NEXT: 2 1 0.50 smax x8, x9, x10
+# CHECK-NEXT: 2 1 0.50 umax w9, w10, w11
+# CHECK-NEXT: 2 1 0.50 umax x10, x11, x12
+# CHECK-NEXT: 2 1 0.50 smin w11, w12, w13
+# CHECK-NEXT: 2 1 0.50 smin w12, w13, w14
+# CHECK-NEXT: 2 1 0.50 umin w13, w14, w15
+# CHECK-NEXT: 2 1 0.50 umin x14, x15, x16
+
+# CHECK: Resources:
+# CHECK-NEXT: [0.0] - Ampere1BUnitA
+# CHECK-NEXT: [0.1] - Ampere1BUnitA
+# CHECK-NEXT: [1.0] - Ampere1BUnitB
+# CHECK-NEXT: [1.1] - Ampere1BUnitB
+# CHECK-NEXT: [2] - Ampere1BUnitBS
+# CHECK-NEXT: [3.0] - Ampere1BUnitL
+# CHECK-NEXT: [3.1] - Ampere1BUnitL
+# CHECK-NEXT: [4.0] - Ampere1BUnitS
+# CHECK-NEXT: [4.1] - Ampere1BUnitS
+# CHECK-NEXT: [5] - Ampere1BUnitX
+# CHECK-NEXT: [6] - Ampere1BUnitY
+# CHECK-NEXT: [7] - Ampere1BUnitZ
+
+# CHECK: Resource pressure per iteration:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4.0] [4.1] [5] [6] [7]
+# CHECK-NEXT: 6.50 6.50 3.50 3.50 2.00 - - - - - - -
+
+# CHECK: Resource pressure by instruction:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4.0] [4.1] [5] [6] [7] Instructions:
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - abs w1, w2
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - abs x2, x3
+# CHECK-NEXT: - - - - 1.00 - - - - - - - cnt w3, w4
+# CHECK-NEXT: - - - - 1.00 - - - - - - - cnt x4, x5
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - ctz w5, w6
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - ctz x6, x7
+# CHECK-NEXT: 0.75 0.75 0.25 0.25 - - - - - - - - smax w7, w8, w9
+# CHECK-NEXT: 0.75 0.75 0.25 0.25 - - - - - - - - smax x8, x9, x10
+# CHECK-NEXT: 0.75 0.75 0.25 0.25 - - - - - - - - umax w9, w10, w11
+# CHECK-NEXT: 0.75 0.75 0.25 0.25 - - - - - - - - umax x10, x11, x12
+# CHECK-NEXT: 0.75 0.75 0.25 0.25 - - - - - - - - smin w11, w12, w13
+# CHECK-NEXT: 0.75 0.75 0.25 0.25 - - - - - - - - smin w12, w13, w14
+# CHECK-NEXT: 0.75 0.75 0.25 0.25 - - - - - - - - umin w13, w14, w15
+# CHECK-NEXT: 0.75 0.75 0.25 0.25 - - - - - - - - umin x14, x15, x16
diff --git a/llvm/test/tools/llvm-mca/AArch64/Ampere/Ampere1B/mte-instructions.s b/llvm/test/tools/llvm-mca/AArch64/Ampere/Ampere1B/mte-instructions.s
new file mode 100644
index 0000000..5148522
--- /dev/null
+++ b/llvm/test/tools/llvm-mca/AArch64/Ampere/Ampere1B/mte-instructions.s
@@ -0,0 +1,349 @@
+# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
+# RUN: llvm-mca -mtriple=aarch64 -mcpu=ampere1b -instruction-tables < %s | FileCheck %s
+
+irg x0, x1
+irg sp, x1
+irg x0, sp
+irg x0, x1, x2
+irg sp, x1, x2
+addg x0, x1, #0, #1
+addg sp, x2, #32, #3
+addg x0, sp, #64, #5
+addg x3, x4, #1008, #6
+addg x5, x6, #112, #15
+subg x0, x1, #0, #1
+subg sp, x2, #32, #3
+subg x0, sp, #64, #5
+subg x3, x4, #1008, #6
+subg x5, x6, #112, #15
+gmi x0, x1, x2
+gmi x3, sp, x4
+gmi xzr, x0, x30
+gmi x30, x0, xzr
+subp x0, x1, x2
+subps x0, x1, x2
+subp x0, sp, sp
+subps x0, sp, sp
+subps xzr, x0, x1
+subps xzr, sp, sp
+stg x0, [x1, #-4096]
+stg x1, [x2, #4080]
+stg x2, [sp, #16]
+stg x3, [x1]
+stg sp, [x1]
+stzg x0, [x1, #-4096]
+stzg x1, [x2, #4080]
+stzg x2, [sp, #16]
+stzg x3, [x1]
+stzg sp, [x1]
+stg x0, [x1, #-4096]!
+stg x1, [x2, #4080]!
+stg x2, [sp, #16]!
+stg sp, [sp, #16]!
+stzg x0, [x1, #-4096]!
+stzg x1, [x2, #4080]!
+stzg x2, [sp, #16]!
+stzg sp, [sp, #16]!
+stg x0, [x1], #-4096
+stg x1, [x2], #4080
+stg x2, [sp], #16
+stg sp, [sp], #16
+stzg x0, [x1], #-4096
+stzg x1, [x2], #4080
+stzg x2, [sp], #16
+stzg sp, [sp], #16
+st2g x0, [x1, #-4096]
+st2g x1, [x2, #4080]
+st2g x2, [sp, #16]
+st2g x3, [x1]
+st2g sp, [x1]
+stz2g x0, [x1, #-4096]
+stz2g x1, [x2, #4080]
+stz2g x2, [sp, #16]
+stz2g x3, [x1]
+stz2g sp, [x1]
+st2g x0, [x1, #-4096]!
+st2g x1, [x2, #4080]!
+st2g x2, [sp, #16]!
+st2g sp, [sp, #16]!
+stz2g x0, [x1, #-4096]!
+stz2g x1, [x2, #4080]!
+stz2g x2, [sp, #16]!
+stz2g sp, [sp, #16]!
+st2g x0, [x1], #-4096
+st2g x1, [x2], #4080
+st2g x2, [sp], #16
+st2g sp, [sp], #16
+stz2g x0, [x1], #-4096
+stz2g x1, [x2], #4080
+stz2g x2, [sp], #16
+stz2g sp, [sp], #16
+stgp x0, x1, [x2, #-1024]
+stgp x0, x1, [x2, #1008]
+stgp x0, x1, [sp, #16]
+stgp xzr, x1, [x2, #16]
+stgp x0, xzr, [x2, #16]
+stgp x0, xzr, [x2]
+stgp x0, x1, [x2, #-1024]!
+stgp x0, x1, [x2, #1008]!
+stgp x0, x1, [sp, #16]!
+stgp xzr, x1, [x2, #16]!
+stgp x0, xzr, [x2, #16]!
+stgp x0, x1, [x2], #-1024
+stgp x0, x1, [x2], #1008
+stgp x0, x1, [sp], #16
+stgp xzr, x1, [x2], #16
+stgp x0, xzr, [x2], #16
+ldg x0, [x1]
+ldg x2, [sp, #-4096]
+ldg x3, [x4, #4080]
+ldgm x0, [x1]
+ldgm x1, [sp]
+ldgm xzr, [x2]
+stgm x0, [x1]
+stgm x1, [sp]
+stgm xzr, [x2]
+stzgm x0, [x1]
+stzgm x1, [sp]
+stzgm xzr, [x2]
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 2 1 1.00 U irg x0, x1
+# CHECK-NEXT: 2 1 1.00 U irg sp, x1
+# CHECK-NEXT: 2 1 1.00 U irg x0, sp
+# CHECK-NEXT: 2 1 1.00 U irg x0, x1, x2
+# CHECK-NEXT: 2 1 1.00 U irg sp, x1, x2
+# CHECK-NEXT: 1 1 0.50 addg x0, x1, #0, #1
+# CHECK-NEXT: 1 1 0.50 addg sp, x2, #32, #3
+# CHECK-NEXT: 1 1 0.50 addg x0, sp, #64, #5
+# CHECK-NEXT: 1 1 0.50 addg x3, x4, #1008, #6
+# CHECK-NEXT: 1 1 0.50 addg x5, x6, #112, #15
+# CHECK-NEXT: 1 1 0.50 U subg x0, x1, #0, #1
+# CHECK-NEXT: 1 1 0.50 U subg sp, x2, #32, #3
+# CHECK-NEXT: 1 1 0.50 U subg x0, sp, #64, #5
+# CHECK-NEXT: 1 1 0.50 U subg x3, x4, #1008, #6
+# CHECK-NEXT: 1 1 0.50 U subg x5, x6, #112, #15
+# CHECK-NEXT: 1 1 0.25 gmi x0, x1, x2
+# CHECK-NEXT: 1 1 0.25 gmi x3, sp, x4
+# CHECK-NEXT: 1 1 0.25 gmi xzr, x0, x30
+# CHECK-NEXT: 1 1 0.25 gmi x30, x0, xzr
+# CHECK-NEXT: 1 1 0.25 subp x0, x1, x2
+# CHECK-NEXT: 1 1 0.25 U subps x0, x1, x2
+# CHECK-NEXT: 1 1 0.25 subp x0, sp, sp
+# CHECK-NEXT: 1 1 0.25 U subps x0, sp, sp
+# CHECK-NEXT: 1 1 0.25 U subps xzr, x0, x1
+# CHECK-NEXT: 1 1 0.25 U subps xzr, sp, sp
+# CHECK-NEXT: 1 1 0.50 * stg x0, [x1, #-4096]
+# CHECK-NEXT: 1 1 0.50 * stg x1, [x2, #4080]
+# CHECK-NEXT: 1 1 0.50 * stg x2, [sp, #16]
+# CHECK-NEXT: 1 1 0.50 * stg x3, [x1]
+# CHECK-NEXT: 1 1 0.50 * stg sp, [x1]
+# CHECK-NEXT: 1 1 0.50 * stzg x0, [x1, #-4096]
+# CHECK-NEXT: 1 1 0.50 * stzg x1, [x2, #4080]
+# CHECK-NEXT: 1 1 0.50 * stzg x2, [sp, #16]
+# CHECK-NEXT: 1 1 0.50 * stzg x3, [x1]
+# CHECK-NEXT: 1 1 0.50 * stzg sp, [x1]
+# CHECK-NEXT: 1 1 0.50 * U stg x0, [x1, #-4096]!
+# CHECK-NEXT: 1 1 0.50 * U stg x1, [x2, #4080]!
+# CHECK-NEXT: 1 1 0.50 * U stg x2, [sp, #16]!
+# CHECK-NEXT: 1 1 0.50 * U stg sp, [sp, #16]!
+# CHECK-NEXT: 1 1 0.50 * U stzg x0, [x1, #-4096]!
+# CHECK-NEXT: 1 1 0.50 * U stzg x1, [x2, #4080]!
+# CHECK-NEXT: 1 1 0.50 * U stzg x2, [sp, #16]!
+# CHECK-NEXT: 1 1 0.50 * U stzg sp, [sp, #16]!
+# CHECK-NEXT: 1 1 0.50 * U stg x0, [x1], #-4096
+# CHECK-NEXT: 1 1 0.50 * U stg x1, [x2], #4080
+# CHECK-NEXT: 1 1 0.50 * U stg x2, [sp], #16
+# CHECK-NEXT: 1 1 0.50 * U stg sp, [sp], #16
+# CHECK-NEXT: 1 1 0.50 * U stzg x0, [x1], #-4096
+# CHECK-NEXT: 1 1 0.50 * U stzg x1, [x2], #4080
+# CHECK-NEXT: 1 1 0.50 * U stzg x2, [sp], #16
+# CHECK-NEXT: 1 1 0.50 * U stzg sp, [sp], #16
+# CHECK-NEXT: 2 1 1.00 * st2g x0, [x1, #-4096]
+# CHECK-NEXT: 2 1 1.00 * st2g x1, [x2, #4080]
+# CHECK-NEXT: 2 1 1.00 * st2g x2, [sp, #16]
+# CHECK-NEXT: 2 1 1.00 * st2g x3, [x1]
+# CHECK-NEXT: 2 1 1.00 * st2g sp, [x1]
+# CHECK-NEXT: 2 1 1.00 * stz2g x0, [x1, #-4096]
+# CHECK-NEXT: 2 1 1.00 * stz2g x1, [x2, #4080]
+# CHECK-NEXT: 2 1 1.00 * stz2g x2, [sp, #16]
+# CHECK-NEXT: 2 1 1.00 * stz2g x3, [x1]
+# CHECK-NEXT: 2 1 1.00 * stz2g sp, [x1]
+# CHECK-NEXT: 2 1 1.00 * U st2g x0, [x1, #-4096]!
+# CHECK-NEXT: 2 1 1.00 * U st2g x1, [x2, #4080]!
+# CHECK-NEXT: 2 1 1.00 * U st2g x2, [sp, #16]!
+# CHECK-NEXT: 2 1 1.00 * U st2g sp, [sp, #16]!
+# CHECK-NEXT: 2 1 1.00 * U stz2g x0, [x1, #-4096]!
+# CHECK-NEXT: 2 1 1.00 * U stz2g x1, [x2, #4080]!
+# CHECK-NEXT: 2 1 1.00 * U stz2g x2, [sp, #16]!
+# CHECK-NEXT: 2 1 1.00 * U stz2g sp, [sp, #16]!
+# CHECK-NEXT: 2 1 1.00 * U st2g x0, [x1], #-4096
+# CHECK-NEXT: 2 1 1.00 * U st2g x1, [x2], #4080
+# CHECK-NEXT: 2 1 1.00 * U st2g x2, [sp], #16
+# CHECK-NEXT: 2 1 1.00 * U st2g sp, [sp], #16
+# CHECK-NEXT: 2 1 1.00 * U stz2g x0, [x1], #-4096
+# CHECK-NEXT: 2 1 1.00 * U stz2g x1, [x2], #4080
+# CHECK-NEXT: 2 1 1.00 * U stz2g x2, [sp], #16
+# CHECK-NEXT: 2 1 1.00 * U stz2g sp, [sp], #16
+# CHECK-NEXT: 2 1 1.00 * stgp x0, x1, [x2, #-1024]
+# CHECK-NEXT: 2 1 1.00 * stgp x0, x1, [x2, #1008]
+# CHECK-NEXT: 2 1 1.00 * stgp x0, x1, [sp, #16]
+# CHECK-NEXT: 2 1 1.00 * stgp xzr, x1, [x2, #16]
+# CHECK-NEXT: 2 1 1.00 * stgp x0, xzr, [x2, #16]
+# CHECK-NEXT: 2 1 1.00 * stgp x0, xzr, [x2]
+# CHECK-NEXT: 2 1 1.00 * stgp x0, x1, [x2, #-1024]!
+# CHECK-NEXT: 2 1 1.00 * stgp x0, x1, [x2, #1008]!
+# CHECK-NEXT: 2 1 1.00 * stgp x0, x1, [sp, #16]!
+# CHECK-NEXT: 2 1 1.00 * stgp xzr, x1, [x2, #16]!
+# CHECK-NEXT: 2 1 1.00 * stgp x0, xzr, [x2, #16]!
+# CHECK-NEXT: 2 1 1.00 * stgp x0, x1, [x2], #-1024
+# CHECK-NEXT: 2 1 1.00 * stgp x0, x1, [x2], #1008
+# CHECK-NEXT: 2 1 1.00 * stgp x0, x1, [sp], #16
+# CHECK-NEXT: 2 1 1.00 * stgp xzr, x1, [x2], #16
+# CHECK-NEXT: 2 1 1.00 * stgp x0, xzr, [x2], #16
+# CHECK-NEXT: 2 4 0.50 * ldg x0, [x1]
+# CHECK-NEXT: 2 4 0.50 * ldg x2, [sp, #-4096]
+# CHECK-NEXT: 2 4 0.50 * ldg x3, [x4, #4080]
+# CHECK-NEXT: 2 4 0.50 * U ldgm x0, [x1]
+# CHECK-NEXT: 2 4 0.50 * U ldgm x1, [sp]
+# CHECK-NEXT: 2 4 0.50 * U ldgm xzr, [x2]
+# CHECK-NEXT: 1 1 0.50 U stgm x0, [x1]
+# CHECK-NEXT: 1 1 0.50 U stgm x1, [sp]
+# CHECK-NEXT: 1 1 0.50 U stgm xzr, [x2]
+# CHECK-NEXT: 1 1 0.50 U stzgm x0, [x1]
+# CHECK-NEXT: 1 1 0.50 U stzgm x1, [sp]
+# CHECK-NEXT: 1 1 0.50 U stzgm xzr, [x2]
+
+# CHECK: Resources:
+# CHECK-NEXT: [0.0] - Ampere1BUnitA
+# CHECK-NEXT: [0.1] - Ampere1BUnitA
+# CHECK-NEXT: [1.0] - Ampere1BUnitB
+# CHECK-NEXT: [1.1] - Ampere1BUnitB
+# CHECK-NEXT: [2] - Ampere1BUnitBS
+# CHECK-NEXT: [3.0] - Ampere1BUnitL
+# CHECK-NEXT: [3.1] - Ampere1BUnitL
+# CHECK-NEXT: [4.0] - Ampere1BUnitS
+# CHECK-NEXT: [4.1] - Ampere1BUnitS
+# CHECK-NEXT: [5] - Ampere1BUnitX
+# CHECK-NEXT: [6] - Ampere1BUnitY
+# CHECK-NEXT: [7] - Ampere1BUnitZ
+
+# CHECK: Resource pressure per iteration:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4.0] [4.1] [5] [6] [7]
+# CHECK-NEXT: 2.50 2.50 13.00 13.00 5.00 3.00 3.00 58.00 58.00 - - -
+
+# CHECK: Resource pressure by instruction:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4.0] [4.1] [5] [6] [7] Instructions:
+# CHECK-NEXT: - - 0.50 0.50 1.00 - - - - - - - irg x0, x1
+# CHECK-NEXT: - - 0.50 0.50 1.00 - - - - - - - irg sp, x1
+# CHECK-NEXT: - - 0.50 0.50 1.00 - - - - - - - irg x0, sp
+# CHECK-NEXT: - - 0.50 0.50 1.00 - - - - - - - irg x0, x1, x2
+# CHECK-NEXT: - - 0.50 0.50 1.00 - - - - - - - irg sp, x1, x2
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - addg x0, x1, #0, #1
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - addg sp, x2, #32, #3
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - addg x0, sp, #64, #5
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - addg x3, x4, #1008, #6
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - addg x5, x6, #112, #15
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - subg x0, x1, #0, #1
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - subg sp, x2, #32, #3
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - subg x0, sp, #64, #5
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - subg x3, x4, #1008, #6
+# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - subg x5, x6, #112, #15
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - gmi x0, x1, x2
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - gmi x3, sp, x4
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - gmi xzr, x0, x30
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - gmi x30, x0, xzr
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - subp x0, x1, x2
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - subps x0, x1, x2
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - subp x0, sp, sp
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - subps x0, sp, sp
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - subps xzr, x0, x1
+# CHECK-NEXT: 0.25 0.25 0.25 0.25 - - - - - - - - subps xzr, sp, sp
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stg x0, [x1, #-4096]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stg x1, [x2, #4080]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stg x2, [sp, #16]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stg x3, [x1]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stg sp, [x1]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stzg x0, [x1, #-4096]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stzg x1, [x2, #4080]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stzg x2, [sp, #16]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stzg x3, [x1]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stzg sp, [x1]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stg x0, [x1, #-4096]!
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stg x1, [x2, #4080]!
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stg x2, [sp, #16]!
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stg sp, [sp, #16]!
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stzg x0, [x1, #-4096]!
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stzg x1, [x2, #4080]!
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stzg x2, [sp, #16]!
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stzg sp, [sp, #16]!
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stg x0, [x1], #-4096
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stg x1, [x2], #4080
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stg x2, [sp], #16
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stg sp, [sp], #16
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stzg x0, [x1], #-4096
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stzg x1, [x2], #4080
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stzg x2, [sp], #16
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stzg sp, [sp], #16
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - st2g x0, [x1, #-4096]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - st2g x1, [x2, #4080]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - st2g x2, [sp, #16]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - st2g x3, [x1]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - st2g sp, [x1]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stz2g x0, [x1, #-4096]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stz2g x1, [x2, #4080]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stz2g x2, [sp, #16]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stz2g x3, [x1]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stz2g sp, [x1]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - st2g x0, [x1, #-4096]!
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - st2g x1, [x2, #4080]!
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - st2g x2, [sp, #16]!
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - st2g sp, [sp, #16]!
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stz2g x0, [x1, #-4096]!
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stz2g x1, [x2, #4080]!
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stz2g x2, [sp, #16]!
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stz2g sp, [sp, #16]!
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - st2g x0, [x1], #-4096
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - st2g x1, [x2], #4080
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - st2g x2, [sp], #16
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - st2g sp, [sp], #16
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stz2g x0, [x1], #-4096
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stz2g x1, [x2], #4080
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stz2g x2, [sp], #16
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stz2g sp, [sp], #16
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stgp x0, x1, [x2, #-1024]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stgp x0, x1, [x2, #1008]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stgp x0, x1, [sp, #16]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stgp xzr, x1, [x2, #16]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stgp x0, xzr, [x2, #16]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stgp x0, xzr, [x2]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stgp x0, x1, [x2, #-1024]!
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stgp x0, x1, [x2, #1008]!
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stgp x0, x1, [sp, #16]!
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stgp xzr, x1, [x2, #16]!
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stgp x0, xzr, [x2, #16]!
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stgp x0, x1, [x2], #-1024
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stgp x0, x1, [x2], #1008
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stgp x0, x1, [sp], #16
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stgp xzr, x1, [x2], #16
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - - stgp x0, xzr, [x2], #16
+# CHECK-NEXT: - - 0.50 0.50 - 0.50 0.50 - - - - - ldg x0, [x1]
+# CHECK-NEXT: - - 0.50 0.50 - 0.50 0.50 - - - - - ldg x2, [sp, #-4096]
+# CHECK-NEXT: - - 0.50 0.50 - 0.50 0.50 - - - - - ldg x3, [x4, #4080]
+# CHECK-NEXT: - - 0.50 0.50 - 0.50 0.50 - - - - - ldgm x0, [x1]
+# CHECK-NEXT: - - 0.50 0.50 - 0.50 0.50 - - - - - ldgm x1, [sp]
+# CHECK-NEXT: - - 0.50 0.50 - 0.50 0.50 - - - - - ldgm xzr, [x2]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stgm x0, [x1]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stgm x1, [sp]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stgm xzr, [x2]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stzgm x0, [x1]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stzgm x1, [sp]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - - stzgm xzr, [x2]
diff --git a/llvm/test/tools/llvm-mca/AArch64/Ampere/Ampere1B/neon-instructions.s b/llvm/test/tools/llvm-mca/AArch64/Ampere/Ampere1B/neon-instructions.s
new file mode 100644
index 0000000..827c13a
--- /dev/null
+++ b/llvm/test/tools/llvm-mca/AArch64/Ampere/Ampere1B/neon-instructions.s
@@ -0,0 +1,3235 @@
+# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
+# RUN: llvm-mca -mtriple=aarch64 -mcpu=ampere1b -instruction-tables < %s | FileCheck %s
+
+abs d29, d24
+abs v0.16b, v0.16b
+abs v0.2d, v0.2d
+abs v0.2s, v0.2s
+abs v0.4h, v0.4h
+abs v0.4s, v0.4s
+abs v0.8b, v0.8b
+abs v0.8h, v0.8h
+add d17, d31, d29
+add v0.8b, v0.8b, v0.8b
+addhn v0.2s, v0.2d, v0.2d
+addhn v0.4h, v0.4s, v0.4s
+addhn v0.8b, v0.8h, v0.8h
+addhn2 v0.16b, v0.8h, v0.8h
+addhn2 v0.4s, v0.2d, v0.2d
+addhn2 v0.8h, v0.4s, v0.4s
+addp v0.2d, v0.2d, v0.2d
+addp v0.8b, v0.8b, v0.8b
+and v0.8b, v0.8b, v0.8b
+bic v0.4h, #15, lsl #8
+bic v0.8b, v0.8b, v0.8b
+bif v0.16b, v0.16b, v0.16b
+bit v0.16b, v0.16b, v0.16b
+bsl v0.8b, v0.8b, v0.8b
+cls v0.16b, v0.16b
+cls v0.2s, v0.2s
+cls v0.4h, v0.4h
+cls v0.4s, v0.4s
+cls v0.8b, v0.8b
+cls v0.8h, v0.8h
+clz v0.16b, v0.16b
+clz v0.2s, v0.2s
+clz v0.4h, v0.4h
+clz v0.4s, v0.4s
+clz v0.8b, v0.8b
+clz v0.8h, v0.8h
+cmeq d20, d21, 0
+cmeq d20, d21, d22
+cmeq v0.16b, v0.16b, 0
+cmeq v0.16b, v0.16b, v0.16b
+cmge d20, d21, 0
+cmge d20, d21, d22
+cmge v0.4h, v0.4h, v0.4h
+cmge v0.8b, v0.8b, 0
+cmgt d20, d21, 0
+cmgt d20, d21, d22
+cmgt v0.2s, v0.2s, 0
+cmgt v0.4s, v0.4s, v0.4s
+cmhi d20, d21, d22
+cmhi v0.8h, v0.8h, v0.8h
+cmhs d20, d21, d22
+cmhs v0.8b, v0.8b, v0.8b
+cmle d20, d21, 0
+cmle v0.2d, v0.2d, 0
+cmlt d20, d21, 0
+cmlt v0.8h, v0.8h, 0
+cmtst d20, d21, d22
+cmtst v0.2s, v0.2s, v0.2s
+cnt v0.16b, v0.16b
+cnt v0.8b, v0.8b
+dup v0.16b,w28
+dup v0.2d,x28
+dup v0.2s,w28
+dup v0.4h,w28
+dup v0.4s,w28
+dup v0.8b,w28
+dup v0.8h,w28
+eor v0.16b, v0.16b, v0.16b
+ext v0.16b, v0.16b, v0.16b, #3
+ext v0.8b, v0.8b, v0.8b, #3
+fabd d29, d24, d20
+fabd s29, s24, s20
+fabd v0.4s, v0.4s, v0.4s
+fabs v0.2d, v0.2d
+fabs v0.2s, v0.2s
+fabs v0.4h, v0.4h
+fabs v0.4s, v0.4s
+fabs v0.8h, v0.8h
+facge d20, d21, d22
+facge s10, s11, s12
+facge v0.4s, v0.4s, v0.4s
+facgt d20, d21, d22
+facgt s10, s11, s12
+facgt v0.2d, v0.2d, v0.2d
+fadd v0.4s, v0.4s, v0.4s
+faddp v0.2s, v0.2s, v0.2s
+faddp v0.4s, v0.4s, v0.4s
+fcmeq d20, d21, #0.0
+fcmeq d20, d21, d22
+fcmeq s10, s11, #0.0
+fcmeq s10, s11, s12
+fcmeq v0.2s, v0.2s, #0.0
+fcmeq v0.2s, v0.2s, v0.2s
+fcmge d20, d21, #0.0
+fcmge d20, d21, d22
+fcmge s10, s11, #0.0
+fcmge s10, s11, s12
+fcmge v0.2d, v0.2d, #0.0
+fcmge v0.4s, v0.4s, v0.4s
+fcmgt d20, d21, #0.0
+fcmgt d20, d21, d22
+fcmgt s10, s11, #0.0
+fcmgt s10, s11, s12
+fcmgt v0.4s, v0.4s, #0.0
+fcmgt v0.4s, v0.4s, v0.4s
+fcmle d20, d21, #0.0
+fcmle s10, s11, #0.0
+fcmle v0.2d, v0.2d, #0.0
+fcmlt d20, d21, #0.0
+fcmlt s10, s11, #0.0
+fcmlt v0.4s, v0.4s, #0.0
+fcvtas d21, d14
+fcvtas s12, s13
+fcvtas v0.2d, v0.2d
+fcvtas v0.2s, v0.2s
+fcvtas v0.4h, v0.4h
+fcvtas v0.4s, v0.4s
+fcvtas v0.8h, v0.8h
+fcvtau d21, d14
+fcvtau s12, s13
+fcvtau v0.2d, v0.2d
+fcvtau v0.2s, v0.2s
+fcvtau v0.4h, v0.4h
+fcvtau v0.4s, v0.4s
+fcvtau v0.8h, v0.8h
+fcvtl v0.2d, v0.2s
+fcvtl v0.4s, v0.4h
+fcvtl2 v0.2d, v0.4s
+fcvtl2 v0.4s, v0.8h
+fcvtms d21, d14
+fcvtms s22, s13
+fcvtms v0.2d, v0.2d
+fcvtms v0.2s, v0.2s
+fcvtms v0.4h, v0.4h
+fcvtms v0.4s, v0.4s
+fcvtms v0.8h, v0.8h
+fcvtmu d21, d14
+fcvtmu s12, s13
+fcvtmu v0.2d, v0.2d
+fcvtmu v0.2s, v0.2s
+fcvtmu v0.4h, v0.4h
+fcvtmu v0.4s, v0.4s
+fcvtmu v0.8h, v0.8h
+fcvtn v0.2s, v0.2d
+fcvtn v0.4h, v0.4s
+fcvtn2 v0.4s, v0.2d
+fcvtn2 v0.8h, v0.4s
+fcvtns d21, d14
+fcvtns s22, s13
+fcvtns v0.2d, v0.2d
+fcvtns v0.2s, v0.2s
+fcvtns v0.4h, v0.4h
+fcvtns v0.4s, v0.4s
+fcvtns v0.8h, v0.8h
+fcvtnu d21, d14
+fcvtnu s12, s13
+fcvtnu v0.2d, v0.2d
+fcvtnu v0.2s, v0.2s
+fcvtnu v0.4h, v0.4h
+fcvtnu v0.4s, v0.4s
+fcvtnu v0.8h, v0.8h
+fcvtps d21, d14
+fcvtps s22, s13
+fcvtps v0.2d, v0.2d
+fcvtps v0.2s, v0.2s
+fcvtps v0.4h, v0.4h
+fcvtps v0.4s, v0.4s
+fcvtps v0.8h, v0.8h
+fcvtpu d21, d14
+fcvtpu s12, s13
+fcvtpu v0.2d, v0.2d
+fcvtpu v0.2s, v0.2s
+fcvtpu v0.4h, v0.4h
+fcvtpu v0.4s, v0.4s
+fcvtpu v0.8h, v0.8h
+fcvtxn s22, d13
+fcvtxn v0.2s, v0.2d
+fcvtxn2 v0.4s, v0.2d
+fcvtzs d21, d12, #1
+fcvtzs d21, d14
+fcvtzs s12, s13
+fcvtzs s21, s12, #1
+fcvtzs v0.2d, v0.2d
+fcvtzs v0.2d, v0.2d, #3
+fcvtzs v0.2s, v0.2s
+fcvtzs v0.2s, v0.2s, #3
+fcvtzs v0.4h, v0.4h
+fcvtzs v0.4s, v0.4s
+fcvtzs v0.4s, v0.4s, #3
+fcvtzs v0.8h, v0.8h
+fcvtzu d21, d12, #1
+fcvtzu d21, d14
+fcvtzu s12, s13
+fcvtzu s21, s12, #1
+fcvtzu v0.2d, v0.2d
+fcvtzu v0.2d, v0.2d, #3
+fcvtzu v0.2s, v0.2s
+fcvtzu v0.2s, v0.2s, #3
+fcvtzu v0.4h, v0.4h
+fcvtzu v0.4s, v0.4s
+fcvtzu v0.4s, v0.4s, #3
+fcvtzu v0.8h, v0.8h
+fdiv v0.2s, v0.2s, v0.2s
+fmax v0.2d, v0.2d, v0.2d
+fmax v0.2s, v0.2s, v0.2s
+fmax v0.4s, v0.4s, v0.4s
+fmaxnm v0.2d, v0.2d, v0.2d
+fmaxnm v0.2s, v0.2s, v0.2s
+fmaxnm v0.4s, v0.4s, v0.4s
+fmaxnmp v0.2d, v0.2d, v0.2d
+fmaxnmp v0.2s, v0.2s, v0.2s
+fmaxnmp v0.4s, v0.4s, v0.4s
+fmaxp v0.2d, v0.2d, v0.2d
+fmaxp v0.2s, v0.2s, v0.2s
+fmaxp v0.4s, v0.4s, v0.4s
+fmin v0.2d, v0.2d, v0.2d
+fmin v0.2s, v0.2s, v0.2s
+fmin v0.4s, v0.4s, v0.4s
+fminnm v0.2d, v0.2d, v0.2d
+fminnm v0.2s, v0.2s, v0.2s
+fminnm v0.4s, v0.4s, v0.4s
+fminnmp v0.2d, v0.2d, v0.2d
+fminnmp v0.2s, v0.2s, v0.2s
+fminnmp v0.4s, v0.4s, v0.4s
+fminp v0.2d, v0.2d, v0.2d
+fminp v0.2s, v0.2s, v0.2s
+fminp v0.4s, v0.4s, v0.4s
+fmla d0, d1, v0.d[1]
+fmla s0, s1, v0.s[3]
+fmla v0.2s, v0.2s, v0.2s
+fmls d0, d4, v0.d[1]
+fmls s3, s5, v0.s[3]
+fmls v0.2s, v0.2s, v0.2s
+fmov v0.2d, #-1.25
+fmov v0.2s, #13.0
+fmov v0.4s, #1.0
+fmul d0, d1, v0.d[1]
+fmul s0, s1, v0.s[3]
+fmul v0.2s, v0.2s, v0.2s
+fmulx d0, d4, v0.d[1]
+fmulx d23, d11, d1
+fmulx s20, s22, s15
+fmulx s3, s5, v0.s[3]
+fmulx v0.2d, v0.2d, v0.2d
+fmulx v0.2s, v0.2s, v0.2s
+fmulx v0.4s, v0.4s, v0.4s
+fneg v0.2d, v0.2d
+fneg v0.2s, v0.2s
+fneg v0.4h, v0.4h
+fneg v0.4s, v0.4s
+fneg v0.8h, v0.8h
+frecpe d13, d13
+frecpe s19, s14
+frecpe v0.2d, v0.2d
+frecpe v0.2s, v0.2s
+frecpe v0.4h, v0.4h
+frecpe v0.4s, v0.4s
+frecpe v0.8h, v0.8h
+frecps v0.4s, v0.4s, v0.4s
+frecps d22, d30, d21
+frecps s21, s16, s13
+frecpx d16, d19
+frecpx s18, s10
+frinta v0.2d, v0.2d
+frinta v0.2s, v0.2s
+frinta v0.4h, v0.4h
+frinta v0.4s, v0.4s
+frinta v0.8h, v0.8h
+frinti v0.2d, v0.2d
+frinti v0.2s, v0.2s
+frinti v0.4h, v0.4h
+frinti v0.4s, v0.4s
+frinti v0.8h, v0.8h
+frintm v0.2d, v0.2d
+frintm v0.2s, v0.2s
+frintm v0.4h, v0.4h
+frintm v0.4s, v0.4s
+frintm v0.8h, v0.8h
+frintn v0.2d, v0.2d
+frintn v0.2s, v0.2s
+frintn v0.4h, v0.4h
+frintn v0.4s, v0.4s
+frintn v0.8h, v0.8h
+frintp v0.2d, v0.2d
+frintp v0.2s, v0.2s
+frintp v0.4h, v0.4h
+frintp v0.4s, v0.4s
+frintp v0.8h, v0.8h
+frintx v0.2d, v0.2d
+frintx v0.2s, v0.2s
+frintx v0.4h, v0.4h
+frintx v0.4s, v0.4s
+frintx v0.8h, v0.8h
+frintz v0.2d, v0.2d
+frintz v0.2s, v0.2s
+frintz v0.4h, v0.4h
+frintz v0.4s, v0.4s
+frintz v0.8h, v0.8h
+frsqrte d21, d12
+frsqrte s22, s13
+frsqrte v0.2d, v0.2d
+frsqrte v0.2s, v0.2s
+frsqrte v0.4h, v0.4h
+frsqrte v0.4s, v0.4s
+frsqrte v0.8h, v0.8h
+frsqrts d8, d22, d18
+frsqrts s21, s5, s12
+frsqrts v0.2d, v0.2d, v0.2d
+fsqrt v0.2d, v0.2d
+fsqrt v0.2s, v0.2s
+fsqrt v0.4h, v0.4h
+fsqrt v0.4s, v0.4s
+fsqrt v0.8h, v0.8h
+fsub v0.2s, v0.2s, v0.2s
+ld1 { v0.16b }, [x0]
+ld1 { v0.2d, v1.2d, v2.2d }, [x0], #48
+ld1 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0]
+ld1 { v0.4s, v1.4s }, [sp], #32
+ld1 { v0.4s, v1.4s, v2.4s }, [sp]
+ld1 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0], x3
+ld1 { v0.8h }, [x15], x2
+ld1 { v0.8h, v1.8h }, [x15]
+ld1 { v0.b }[9], [x0]
+ld1 { v0.b }[9], [x0], #1
+ld1r { v0.16b }, [x0]
+ld1r { v0.16b }, [x0], #1
+ld1r { v0.8h }, [x15]
+ld1r { v0.8h }, [x15], #2
+ld2 { v0.16b, v1.16b }, [x0], x1
+ld2 { v0.8b, v1.8b }, [x0]
+ld2 { v0.h, v1.h }[7], [x15]
+ld2 { v0.h, v1.h }[7], [x15], #4
+ld2r { v0.2d, v1.2d }, [x0]
+ld2r { v0.2d, v1.2d }, [x0], #16
+ld2r { v0.4s, v1.4s }, [sp]
+ld2r { v0.4s, v1.4s }, [sp], #8
+ld3 { v0.4h, v1.4h, v2.4h }, [x15]
+ld3 { v0.8h, v1.8h, v2.8h }, [x15], x2
+ld3 { v0.s, v1.s, v2.s }[3], [sp]
+ld3 { v0.s, v1.s, v2.s }[3], [sp], x3
+ld3r { v0.4h, v1.4h, v2.4h }, [x15]
+ld3r { v0.4h, v1.4h, v2.4h }, [x15], #6
+ld3r { v0.8b, v1.8b, v2.8b }, [x0]
+ld3r { v0.8b, v1.8b, v2.8b }, [x0], #3
+ld4 { v0.2s, v1.2s, v2.2s, v3.2s }, [sp]
+ld4 { v0.4s, v1.4s, v2.4s, v3.4s }, [sp], #64
+ld4 { v0.d, v1.d, v2.d, v3.d }[1], [x0]
+ld4 { v0.d, v1.d, v2.d, v3.d }[1], [x0], #32
+ld4 { v0.h, v1.h, v2.h, v3.h }[7], [x0], x0
+ld4r { v0.1d, v1.1d, v2.1d, v3.1d }, [sp]
+ld4r { v0.1d, v1.1d, v2.1d, v3.1d }, [sp], x7
+ld4r { v0.2s, v1.2s, v2.2s, v3.2s }, [sp]
+ld4r { v0.2s, v1.2s, v2.2s, v3.2s }, [sp], x30
+mla v0.8b, v0.8b, v0.8b
+mls v0.4h, v0.4h, v0.4h
+mov b0, v0.b[15]
+mov d6, v0.d[1]
+mov h2, v0.h[5]
+mov s17, v0.s[2]
+mov v2.b[0], v0.b[0]
+mov v2.h[1], v0.h[1]
+mov v2.s[2], v0.s[2]
+mov v2.d[1], v0.d[1]
+mov v0.b[0], w8
+mov v0.h[1], w8
+mov v0.s[2], w8
+mov v0.d[1], x8
+mov v0.16b, v0.16b
+mov v0.8b, v0.8b
+movi d15, #0xff00ff00ff00ff
+movi v0.16b, #31
+movi v0.2d, #0xff0000ff0000ffff
+movi v0.2s, #8, msl #8
+movi v0.4s, #255, lsl #24
+movi v0.8b, #255
+mul v0.8b, v0.8b, v0.8b
+mvni v0.2s, 0
+mvni v0.4s, #16, msl #16
+neg d29, d24
+neg v0.16b, v0.16b
+neg v0.2d, v0.2d
+neg v0.2s, v0.2s
+neg v0.4h, v0.4h
+neg v0.4s, v0.4s
+neg v0.8b, v0.8b
+neg v0.8h, v0.8h
+not v0.16b, v0.16b
+not v0.8b, v0.8b
+orn v0.16b, v0.16b, v0.16b
+orr v0.16b, v0.16b, v0.16b
+orr v0.8h, #31
+pmul v0.16b, v0.16b, v0.16b
+pmul v0.8b, v0.8b, v0.8b
+pmull v0.8h, v0.8b, v0.8b
+pmull2 v0.8h, v0.16b, v0.16b
+raddhn v0.2s, v0.2d, v0.2d
+raddhn v0.4h, v0.4s, v0.4s
+raddhn v0.8b, v0.8h, v0.8h
+raddhn2 v0.16b, v0.8h, v0.8h
+raddhn2 v0.4s, v0.2d, v0.2d
+raddhn2 v0.8h, v0.4s, v0.4s
+rbit v0.16b, v0.16b
+rbit v0.8b, v0.8b
+rev16 v21.8b, v1.8b
+rev16 v30.16b, v31.16b
+rev32 v0.4h, v9.4h
+rev32 v21.8b, v1.8b
+rev32 v30.16b, v31.16b
+rev32 v4.8h, v7.8h
+rev64 v0.16b, v31.16b
+rev64 v1.8b, v9.8b
+rev64 v13.4h, v21.4h
+rev64 v2.8h, v4.8h
+rev64 v4.2s, v0.2s
+rev64 v6.4s, v8.4s
+rshrn v0.2s, v0.2d, #3
+rshrn v0.4h, v0.4s, #3
+rshrn v0.8b, v0.8h, #3
+rshrn2 v0.16b, v0.8h, #3
+rshrn2 v0.4s, v0.2d, #3
+rshrn2 v0.8h, v0.4s, #3
+rsubhn v0.2s, v0.2d, v0.2d
+rsubhn v0.4h, v0.4s, v0.4s
+rsubhn v0.8b, v0.8h, v0.8h
+rsubhn2 v0.16b, v0.8h, v0.8h
+rsubhn2 v0.4s, v0.2d, v0.2d
+rsubhn2 v0.8h, v0.4s, v0.4s
+saba v0.16b, v0.16b, v0.16b
+sabal v0.2d, v0.2s, v0.2s
+sabal v0.4s, v0.4h, v0.4h
+sabal v0.8h, v0.8b, v0.8b
+sabal2 v0.2d, v0.4s, v0.4s
+sabal2 v0.4s, v0.8h, v0.8h
+sabal2 v0.8h, v0.16b, v0.16b
+sabd v0.4h, v0.4h, v0.4h
+sabdl v0.2d, v0.2s, v0.2s
+sabdl v0.4s, v0.4h, v0.4h
+sabdl v0.8h, v0.8b, v0.8b
+sabdl2 v0.2d, v0.4s, v0.4s
+sabdl2 v0.4s, v0.8h, v0.8h
+sabdl2 v0.8h, v0.16b, v0.16b
+sadalp v0.1d, v0.2s
+sadalp v0.2d, v0.4s
+sadalp v0.2s, v0.4h
+sadalp v0.4h, v0.8b
+sadalp v0.4s, v0.8h
+sadalp v0.8h, v0.16b
+saddl v0.2d, v0.2s, v0.2s
+saddl v0.4s, v0.4h, v0.4h
+saddl v0.8h, v0.8b, v0.8b
+saddl2 v0.2d, v0.4s, v0.4s
+saddl2 v0.4s, v0.8h, v0.8h
+saddl2 v0.8h, v0.16b, v0.16b
+saddlp v0.1d, v0.2s
+saddlp v0.2d, v0.4s
+saddlp v0.2s, v0.4h
+saddlp v0.4h, v0.8b
+saddlp v0.4s, v0.8h
+saddlp v0.8h, v0.16b
+saddw v0.2d, v0.2d, v0.2s
+saddw v0.4s, v0.4s, v0.4h
+saddw v0.8h, v0.8h, v0.8b
+saddw2 v0.2d, v0.2d, v0.4s
+saddw2 v0.4s, v0.4s, v0.8h
+saddw2 v0.8h, v0.8h, v0.16b
+scvtf d21, d12
+scvtf d21, d12, #64
+scvtf s22, s13
+scvtf s22, s13, #32
+scvtf v0.2d, v0.2d
+scvtf v0.2d, v0.2d, #3
+scvtf v0.2s, v0.2s
+scvtf v0.2s, v0.2s, #3
+scvtf v0.4h, v0.4h
+scvtf v0.4s, v0.4s
+scvtf v0.4s, v0.4s, #3
+scvtf v0.8h, v0.8h
+shadd v0.8b, v0.8b, v0.8b
+shl d7, d10, #12
+shl v0.16b, v0.16b, #3
+shl v0.2d, v0.2d, #3
+shl v0.4h, v0.4h, #3
+shl v0.4s, v0.4s, #3
+shll v0.2d, v0.2s, #32
+shll v0.4s, v0.4h, #16
+shll v0.8h, v0.8b, #8
+shll v0.2d, v0.2s, #32
+shll v0.4s, v0.4h, #16
+shll v0.8h, v0.8b, #8
+shll2 v0.2d, v0.4s, #32
+shll2 v0.4s, v0.8h, #16
+shll2 v0.8h, v0.16b, #8
+shll2 v0.2d, v0.4s, #32
+shll2 v0.4s, v0.8h, #16
+shll2 v0.8h, v0.16b, #8
+shrn v0.2s, v0.2d, #3
+shrn v0.4h, v0.4s, #3
+shrn v0.8b, v0.8h, #3
+shrn2 v0.16b, v0.8h, #3
+shrn2 v0.4s, v0.2d, #3
+shrn2 v0.8h, v0.4s, #3
+shsub v0.2s, v0.2s, v0.2s
+shsub v0.4h, v0.4h, v0.4h
+sli d10, d14, #12
+sli v0.16b, v0.16b, #3
+sli v0.2d, v0.2d, #3
+sli v0.2s, v0.2s, #3
+sli v0.4h, v0.4h, #3
+sli v0.4s, v0.4s, #3
+sli v0.8b, v0.8b, #3
+sli v0.8h, v0.8h, #3
+smax v0.2s, v0.2s, v0.2s
+smax v0.4h, v0.4h, v0.4h
+smax v0.8b, v0.8b, v0.8b
+smaxp v0.2s, v0.2s, v0.2s
+smaxp v0.4h, v0.4h, v0.4h
+smaxp v0.8b, v0.8b, v0.8b
+smin v0.16b, v0.16b, v0.16b
+smin v0.4s, v0.4s, v0.4s
+smin v0.8h, v0.8h, v0.8h
+sminp v0.16b, v0.16b, v0.16b
+sminp v0.4s, v0.4s, v0.4s
+sminp v0.8h, v0.8h, v0.8h
+smlal v0.2d, v0.2s, v0.2s
+smlal v0.4s, v0.4h, v0.4h
+smlal v0.8h, v0.8b, v0.8b
+smlal2 v0.2d, v0.4s, v0.4s
+smlal2 v0.4s, v0.8h, v0.8h
+smlal2 v0.8h, v0.16b, v0.16b
+smlsl v0.2d, v0.2s, v0.2s
+smlsl v0.4s, v0.4h, v0.4h
+smlsl v0.8h, v0.8b, v0.8b
+smlsl2 v0.2d, v0.4s, v0.4s
+smlsl2 v0.4s, v0.8h, v0.8h
+smlsl2 v0.8h, v0.16b, v0.16b
+smull v0.2d, v0.2s, v0.2s
+smull v0.4s, v0.4h, v0.4h
+smull v0.8h, v0.8b, v0.8b
+smull2 v0.2d, v0.4s, v0.4s
+smull2 v0.4s, v0.8h, v0.8h
+smull2 v0.8h, v0.16b, v0.16b
+sqabs b19, b14
+sqabs d18, d12
+sqabs h21, h15
+sqabs s20, s12
+sqabs v0.16b, v0.16b
+sqabs v0.2d, v0.2d
+sqabs v0.2s, v0.2s
+sqabs v0.4h, v0.4h
+sqabs v0.4s, v0.4s
+sqabs v0.8b, v0.8b
+sqabs v0.8h, v0.8h
+sqadd b20, b11, b15
+sqadd v0.16b, v0.16b, v0.16b
+sqadd v0.2s, v0.2s, v0.2s
+sqdmlal d19, s24, s12
+sqdmlal d8, s9, v0.s[1]
+sqdmlal s0, h0, v0.h[3]
+sqdmlal s17, h27, h12
+sqdmlal v0.2d, v0.2s, v0.2s
+sqdmlal v0.4s, v0.4h, v0.4h
+sqdmlal2 v0.2d, v0.4s, v0.4s
+sqdmlal2 v0.4s, v0.8h, v0.8h
+sqdmlsl d12, s23, s13
+sqdmlsl d8, s9, v0.s[1]
+sqdmlsl s0, h0, v0.h[3]
+sqdmlsl s14, h12, h25
+sqdmlsl v0.2d, v0.2s, v0.2s
+sqdmlsl v0.4s, v0.4h, v0.4h
+sqdmlsl2 v0.2d, v0.4s, v0.4s
+sqdmlsl2 v0.4s, v0.8h, v0.8h
+sqdmulh h10, h11, h12
+sqdmulh h7, h15, v0.h[3]
+sqdmulh s15, s14, v0.s[1]
+sqdmulh s20, s21, s2
+sqdmulh v0.2s, v0.2s, v0.2s
+sqdmulh v0.4s, v0.4s, v0.4s
+sqdmull d1, s1, v0.s[1]
+sqdmull d15, s22, s12
+sqdmull s1, h1, v0.h[3]
+sqdmull s12, h22, h12
+sqdmull v0.2d, v0.2s, v0.2s
+sqdmull v0.4s, v0.4h, v0.4h
+sqdmull2 v0.2d, v0.4s, v0.4s
+sqdmull2 v0.4s, v0.8h, v0.8h
+sqneg b19, b14
+sqneg d18, d12
+sqneg h21, h15
+sqneg s20, s12
+sqneg v0.16b, v0.16b
+sqneg v0.2d, v0.2d
+sqneg v0.2s, v0.2s
+sqneg v0.4h, v0.4h
+sqneg v0.4s, v0.4s
+sqneg v0.8b, v0.8b
+sqneg v0.8h, v0.8h
+sqrdmulh h10, h11, h12
+sqrdmulh h7, h15, v0.h[3]
+sqrdmulh s15, s14, v0.s[1]
+sqrdmulh s20, s21, s2
+sqrdmulh v0.4h, v0.4h, v0.4h
+sqrdmulh v0.8h, v0.8h, v0.8h
+sqrshl d31, d31, d31
+sqrshl h3, h4, h15
+sqrshl v0.2s, v0.2s, v0.2s
+sqrshl v0.4h, v0.4h, v0.4h
+sqrshl v0.8b, v0.8b, v0.8b
+sqrshrn b10, h13, #2
+sqrshrn h15, s10, #6
+sqrshrn s15, d12, #9
+sqrshrn v0.2s, v0.2d, #3
+sqrshrn v0.4h, v0.4s, #3
+sqrshrn v0.8b, v0.8h, #3
+sqrshrn2 v0.16b, v0.8h, #3
+sqrshrn2 v0.4s, v0.2d, #3
+sqrshrn2 v0.8h, v0.4s, #3
+sqrshrun b17, h10, #6
+sqrshrun h10, s13, #15
+sqrshrun s22, d16, #31
+sqrshrun v0.2s, v0.2d, #3
+sqrshrun v0.4h, v0.4s, #3
+sqrshrun v0.8b, v0.8h, #3
+sqrshrun2 v0.16b, v0.8h, #3
+sqrshrun2 v0.4s, v0.2d, #3
+sqrshrun2 v0.8h, v0.4s, #3
+sqshl b11, b19, #7
+sqshl d15, d16, #51
+sqshl d31, d31, d31
+sqshl h13, h18, #11
+sqshl h3, h4, h15
+sqshl s14, s17, #22
+sqshl v0.16b, v0.16b, #3
+sqshl v0.2d, v0.2d, #3
+sqshl v0.2s, v0.2s, #3
+sqshl v0.2s, v0.2s, v0.2s
+sqshl v0.4h, v0.4h, #3
+sqshl v0.4h, v0.4h, v0.4h
+sqshl v0.4s, v0.4s, #3
+sqshl v0.8b, v0.8b, #3
+sqshl v0.8b, v0.8b, v0.8b
+sqshl v0.8h, v0.8h, #3
+sqshlu b15, b18, #6
+sqshlu d11, d13, #32
+sqshlu h19, h17, #6
+sqshlu s16, s14, #25
+sqshlu v0.16b, v0.16b, #3
+sqshlu v0.2d, v0.2d, #3
+sqshlu v0.2s, v0.2s, #3
+sqshlu v0.4h, v0.4h, #3
+sqshlu v0.4s, v0.4s, #3
+sqshlu v0.8b, v0.8b, #3
+sqshlu v0.8h, v0.8h, #3
+sqshrn b10, h15, #5
+sqshrn h17, s10, #4
+sqshrn s18, d10, #31
+sqshrn v0.2s, v0.2d, #3
+sqshrn v0.4h, v0.4s, #3
+sqshrn v0.8b, v0.8h, #3
+sqshrn2 v0.16b, v0.8h, #3
+sqshrn2 v0.4s, v0.2d, #3
+sqshrn2 v0.8h, v0.4s, #3
+sqshrun b15, h10, #7
+sqshrun h20, s14, #3
+sqshrun s10, d15, #15
+sqshrun v0.2s, v0.2d, #3
+sqshrun v0.4h, v0.4s, #3
+sqshrun v0.8b, v0.8h, #3
+sqshrun2 v0.16b, v0.8h, #3
+sqshrun2 v0.4s, v0.2d, #3
+sqshrun2 v0.8h, v0.4s, #3
+sqsub s20, s10, s7
+sqsub v0.2d, v0.2d, v0.2d
+sqsub v0.4s, v0.4s, v0.4s
+sqsub v0.8b, v0.8b, v0.8b
+sqxtn b18, h18
+sqxtn h20, s17
+sqxtn s19, d14
+sqxtn v0.2s, v0.2d
+sqxtn v0.4h, v0.4s
+sqxtn v0.8b, v0.8h
+sqxtn2 v0.16b, v0.8h
+sqxtn2 v0.4s, v0.2d
+sqxtn2 v0.8h, v0.4s
+sqxtun b19, h14
+sqxtun h21, s15
+sqxtun s20, d12
+sqxtun v0.2s, v0.2d
+sqxtun v0.4h, v0.4s
+sqxtun v0.8b, v0.8h
+sqxtun2 v0.16b, v0.8h
+sqxtun2 v0.4s, v0.2d
+sqxtun2 v0.8h, v0.4s
+srhadd v0.2s, v0.2s, v0.2s
+srhadd v0.4h, v0.4h, v0.4h
+srhadd v0.8b, v0.8b, v0.8b
+sri d10, d12, #14
+sri v0.16b, v0.16b, #3
+sri v0.2d, v0.2d, #3
+sri v0.2s, v0.2s, #3
+sri v0.4h, v0.4h, #3
+sri v0.4s, v0.4s, #3
+sri v0.8b, v0.8b, #3
+sri v0.8h, v0.8h, #3
+srshl d16, d16, d16
+srshl v0.2s, v0.2s, v0.2s
+srshl v0.4h, v0.4h, v0.4h
+srshl v0.8b, v0.8b, v0.8b
+srshr d19, d18, #7
+srshr v0.16b, v0.16b, #3
+srshr v0.2d, v0.2d, #3
+srshr v0.2s, v0.2s, #3
+srshr v0.4h, v0.4h, #3
+srshr v0.4s, v0.4s, #3
+srshr v0.8b, v0.8b, #3
+srshr v0.8h, v0.8h, #3
+srsra d15, d11, #19
+srsra v0.16b, v0.16b, #3
+srsra v0.2d, v0.2d, #3
+srsra v0.2s, v0.2s, #3
+srsra v0.4h, v0.4h, #3
+srsra v0.4s, v0.4s, #3
+srsra v0.8b, v0.8b, #3
+srsra v0.8h, v0.8h, #3
+sshl d31, d31, d31
+sshl v0.2d, v0.2d, v0.2d
+sshl v0.2s, v0.2s, v0.2s
+sshl v0.4h, v0.4h, v0.4h
+sshl v0.8b, v0.8b, v0.8b
+sshll v0.2d, v0.2s, #3
+sshll2 v0.4s, v0.8h, #3
+sshr d15, d16, #12
+sshr v0.16b, v0.16b, #3
+sshr v0.2d, v0.2d, #3
+sshr v0.2s, v0.2s, #3
+sshr v0.4h, v0.4h, #3
+sshr v0.4s, v0.4s, #3
+sshr v0.8b, v0.8b, #3
+sshr v0.8h, v0.8h, #3
+ssra d18, d12, #21
+ssra v0.16b, v0.16b, #3
+ssra v0.2d, v0.2d, #3
+ssra v0.2s, v0.2s, #3
+ssra v0.4h, v0.4h, #3
+ssra v0.4s, v0.4s, #3
+ssra v0.8b, v0.8b, #3
+ssra v0.8h, v0.8h, #3
+ssubl v0.2d, v0.2s, v0.2s
+ssubl v0.4s, v0.4h, v0.4h
+ssubl v0.8h, v0.8b, v0.8b
+ssubl2 v0.2d, v0.4s, v0.4s
+ssubl2 v0.4s, v0.8h, v0.8h
+ssubl2 v0.8h, v0.16b, v0.16b
+ssubw v0.2d, v0.2d, v0.2s
+ssubw v0.4s, v0.4s, v0.4h
+ssubw v0.8h, v0.8h, v0.8b
+ssubw2 v0.2d, v0.2d, v0.4s
+ssubw2 v0.4s, v0.4s, v0.8h
+ssubw2 v0.8h, v0.8h, v0.16b
+st1 { v0.16b }, [x0]
+st1 { v0.2d, v1.2d, v2.2d }, [x0], #48
+st1 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0]
+st1 { v0.4s, v1.4s }, [sp], #32
+st1 { v0.4s, v1.4s, v2.4s }, [sp]
+st1 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0], x3
+st1 { v0.8h }, [x15], x2
+st1 { v0.8h, v1.8h }, [x15]
+st1 { v0.d }[1], [x0]
+st1 { v0.d }[1], [x0], #8
+st2 { v0.16b, v1.16b }, [x0], x1
+st2 { v0.8b, v1.8b }, [x0]
+st2 { v0.s, v1.s }[3], [sp]
+st2 { v0.s, v1.s }[3], [sp], #8
+st3 { v0.4h, v1.4h, v2.4h }, [x15]
+st3 { v0.8h, v1.8h, v2.8h }, [x15], x2
+st3 { v0.h, v1.h, v2.h }[7], [x15]
+st3 { v0.h, v1.h, v2.h }[7], [x15], #6
+st4 { v0.2s, v1.2s, v2.2s, v3.2s }, [sp]
+st4 { v0.4s, v1.4s, v2.4s, v3.4s }, [sp], #64
+st4 { v0.b, v1.b, v2.b, v3.b }[9], [x0]
+st4 { v0.b, v1.b, v2.b, v3.b }[9], [x0], x5
+sub d15, d5, d16
+sub v0.2d, v0.2d, v0.2d
+suqadd b19, b14
+suqadd d18, d22
+suqadd h20, h15
+suqadd s21, s12
+suqadd v0.16b, v0.16b
+suqadd v0.2d, v0.2d
+suqadd v0.2s, v0.2s
+suqadd v0.4h, v0.4h
+suqadd v0.4s, v0.4s
+suqadd v0.8b, v0.8b
+suqadd v0.8h, v0.8h
+tbl v0.16b, { v0.16b }, v0.16b
+tbl v0.16b, { v0.16b, v1.16b }, v0.16b
+tbl v0.16b, { v0.16b, v1.16b, v2.16b }, v0.16b
+tbl v0.16b, { v0.16b, v1.16b, v2.16b, v3.16b }, v0.16b
+tbl v0.8b, { v0.16b }, v0.8b
+tbl v0.8b, { v0.16b, v1.16b }, v0.8b
+tbl v0.8b, { v0.16b, v1.16b, v2.16b }, v0.8b
+tbl v0.8b, { v0.16b, v1.16b, v2.16b, v3.16b }, v0.8b
+tbx v0.16b, { v0.16b }, v0.16b
+tbx v0.16b, { v0.16b, v1.16b }, v0.16b
+tbx v0.16b, { v0.16b, v1.16b, v2.16b }, v0.16b
+tbx v0.16b, { v0.16b, v1.16b, v2.16b, v3.16b }, v0.16b
+tbx v0.8b, { v0.16b }, v0.8b
+tbx v0.8b, { v0.16b, v1.16b }, v0.8b
+tbx v0.8b, { v0.16b, v1.16b, v2.16b }, v0.8b
+tbx v0.8b, { v0.16b, v1.16b, v2.16b, v3.16b }, v0.8b
+trn1 v0.16b, v0.16b, v0.16b
+trn1 v0.2d, v0.2d, v0.2d
+trn1 v0.2s, v0.2s, v0.2s
+trn1 v0.4h, v0.4h, v0.4h
+trn1 v0.4s, v0.4s, v0.4s
+trn1 v0.8b, v0.8b, v0.8b
+trn1 v0.8h, v0.8h, v0.8h
+trn2 v0.16b, v0.16b, v0.16b
+trn2 v0.2d, v0.2d, v0.2d
+trn2 v0.2s, v0.2s, v0.2s
+trn2 v0.4h, v0.4h, v0.4h
+trn2 v0.4s, v0.4s, v0.4s
+trn2 v0.8b, v0.8b, v0.8b
+trn2 v0.8h, v0.8h, v0.8h
+uaba v0.8b, v0.8b, v0.8b
+uabal v0.2d, v0.2s, v0.2s
+uabal v0.4s, v0.4h, v0.4h
+uabal v0.8h, v0.8b, v0.8b
+uabal2 v0.2d, v0.4s, v0.4s
+uabal2 v0.4s, v0.8h, v0.8h
+uabal2 v0.8h, v0.16b, v0.16b
+uabd v0.4h, v0.4h, v0.4h
+uabdl v0.2d, v0.2s, v0.2s
+uabdl v0.4s, v0.4h, v0.4h
+uabdl v0.8h, v0.8b, v0.8b
+uabdl2 v0.2d, v0.4s, v0.4s
+uabdl2 v0.4s, v0.8h, v0.8h
+uabdl2 v0.8h, v0.16b, v0.16b
+uadalp v0.1d, v0.2s
+uadalp v0.2d, v0.4s
+uadalp v0.2s, v0.4h
+uadalp v0.4h, v0.8b
+uadalp v0.4s, v0.8h
+uadalp v0.8h, v0.16b
+uaddl v0.2d, v0.2s, v0.2s
+uaddl v0.4s, v0.4h, v0.4h
+uaddl v0.8h, v0.8b, v0.8b
+uaddl2 v0.2d, v0.4s, v0.4s
+uaddl2 v0.4s, v0.8h, v0.8h
+uaddl2 v0.8h, v0.16b, v0.16b
+uaddlp v0.1d, v0.2s
+uaddlp v0.2d, v0.4s
+uaddlp v0.2s, v0.4h
+uaddlp v0.4h, v0.8b
+uaddlp v0.4s, v0.8h
+uaddlp v0.8h, v0.16b
+uaddw v0.2d, v0.2d, v0.2s
+uaddw v0.4s, v0.4s, v0.4h
+uaddw v0.8h, v0.8h, v0.8b
+uaddw2 v0.2d, v0.2d, v0.4s
+uaddw2 v0.4s, v0.4s, v0.8h
+uaddw2 v0.8h, v0.8h, v0.16b
+ucvtf d21, d14
+ucvtf d21, d14, #64
+ucvtf s22, s13
+ucvtf s22, s13, #32
+ucvtf v0.2d, v0.2d
+ucvtf v0.2d, v0.2d, #3
+ucvtf v0.2s, v0.2s
+ucvtf v0.2s, v0.2s, #3
+ucvtf v0.4h, v0.4h
+ucvtf v0.4s, v0.4s
+ucvtf v0.4s, v0.4s, #3
+ucvtf v0.8h, v0.8h
+uhadd v0.16b, v0.16b, v0.16b
+uhadd v0.8h, v0.8h, v0.8h
+uhsub v0.4s, v0.4s, v0.4s
+umax v0.16b, v0.16b, v0.16b
+umax v0.4s, v0.4s, v0.4s
+umax v0.8h, v0.8h, v0.8h
+umaxp v0.16b, v0.16b, v0.16b
+umaxp v0.4s, v0.4s, v0.4s
+umaxp v0.8h, v0.8h, v0.8h
+umin v0.2s, v0.2s, v0.2s
+umin v0.4h, v0.4h, v0.4h
+umin v0.8b, v0.8b, v0.8b
+uminp v0.2s, v0.2s, v0.2s
+uminp v0.4h, v0.4h, v0.4h
+uminp v0.8b, v0.8b, v0.8b
+umlal v0.2d, v0.2s, v0.2s
+umlal v0.4s, v0.4h, v0.4h
+umlal v0.8h, v0.8b, v0.8b
+umlal2 v0.2d, v0.4s, v0.4s
+umlal2 v0.4s, v0.8h, v0.8h
+umlal2 v0.8h, v0.16b, v0.16b
+umlsl v0.2d, v0.2s, v0.2s
+umlsl v0.4s, v0.4h, v0.4h
+umlsl v0.8h, v0.8b, v0.8b
+umlsl2 v0.2d, v0.4s, v0.4s
+umlsl2 v0.4s, v0.8h, v0.8h
+umlsl2 v0.8h, v0.16b, v0.16b
+umull v0.2d, v0.2s, v0.2s
+umull v0.4s, v0.4h, v0.4h
+umull v0.8h, v0.8b, v0.8b
+umull2 v0.2d, v0.4s, v0.4s
+umull2 v0.4s, v0.8h, v0.8h
+umull2 v0.8h, v0.16b, v0.16b
+uqadd h0, h1, h5
+uqadd v0.8h, v0.8h, v0.8h
+uqrshl b11, b20, b30
+uqrshl s23, s20, s16
+uqrshl v0.16b, v0.16b, v0.16b
+uqrshl v0.4s, v0.4s, v0.4s
+uqrshl v0.4s, v0.4s, v0.4s
+uqrshl v0.8h, v0.8h, v0.8h
+uqrshrn b10, h12, #5
+uqrshrn h12, s10, #14
+uqrshrn s10, d10, #25
+uqrshrn v0.2s, v0.2d, #3
+uqrshrn v0.4h, v0.4s, #3
+uqrshrn v0.8b, v0.8h, #3
+uqrshrn2 v0.16b, v0.8h, #3
+uqrshrn2 v0.4s, v0.2d, #3
+uqrshrn2 v0.8h, v0.4s, #3
+uqshl b11, b20, b30
+uqshl b18, b15, #6
+uqshl d15, d12, #19
+uqshl h11, h18, #7
+uqshl s14, s19, #18
+uqshl s23, s20, s16
+uqshl v0.16b, v0.16b, #3
+uqshl v0.16b, v0.16b, v0.16b
+uqshl v0.2d, v0.2d, #3
+uqshl v0.2d, v0.2d, v0.2d
+uqshl v0.2s, v0.2s, #3
+uqshl v0.4h, v0.4h, #3
+uqshl v0.4s, v0.4s, #3
+uqshl v0.4s, v0.4s, v0.4s
+uqshl v0.8b, v0.8b, #3
+uqshl v0.8h, v0.8h, #3
+uqshl v0.8h, v0.8h, v0.8h
+uqshrn b12, h10, #7
+uqshrn h10, s14, #5
+uqshrn s10, d12, #13
+uqshrn v0.2s, v0.2d, #3
+uqshrn v0.4h, v0.4s, #3
+uqshrn v0.8b, v0.8h, #3
+uqshrn2 v0.16b, v0.8h, #3
+uqshrn2 v0.4s, v0.2d, #3
+uqshrn2 v0.8h, v0.4s, #3
+uqsub d16, d16, d16
+uqsub v0.4h, v0.4h, v0.4h
+uqxtn b18, h18
+uqxtn h20, s17
+uqxtn s19, d14
+uqxtn v0.2s, v0.2d
+uqxtn v0.4h, v0.4s
+uqxtn v0.8b, v0.8h
+uqxtn2 v0.16b, v0.8h
+uqxtn2 v0.4s, v0.2d
+uqxtn2 v0.8h, v0.4s
+urecpe v0.2s, v0.2s
+urecpe v0.4s, v0.4s
+urhadd v0.16b, v0.16b, v0.16b
+urhadd v0.4s, v0.4s, v0.4s
+urhadd v0.8h, v0.8h, v0.8h
+urshl d8, d7, d4
+urshl v0.16b, v0.16b, v0.16b
+urshl v0.2d, v0.2d, v0.2d
+urshl v0.4s, v0.4s, v0.4s
+urshl v0.8h, v0.8h, v0.8h
+urshr d20, d23, #31
+urshr v0.16b, v0.16b, #3
+urshr v0.2d, v0.2d, #3
+urshr v0.2s, v0.2s, #3
+urshr v0.4h, v0.4h, #3
+urshr v0.4s, v0.4s, #3
+urshr v0.8b, v0.8b, #3
+urshr v0.8h, v0.8h, #3
+ursqrte v0.2s, v0.2s
+ursqrte v0.4s, v0.4s
+ursra d18, d10, #13
+ursra v0.16b, v0.16b, #3
+ursra v0.2d, v0.2d, #3
+ursra v0.2s, v0.2s, #3
+ursra v0.4h, v0.4h, #3
+ursra v0.4s, v0.4s, #3
+ursra v0.8b, v0.8b, #3
+ursra v0.8h, v0.8h, #3
+ushl d0, d0, d0
+ushl v0.16b, v0.16b, v0.16b
+ushl v0.4s, v0.4s, v0.4s
+ushl v0.8h, v0.8h, v0.8h
+ushll v0.4s, v0.4h, #3
+ushll2 v0.8h, v0.16b, #3
+ushr d10, d17, #18
+ushr v0.16b, v0.16b, #3
+ushr v0.2d, v0.2d, #3
+ushr v0.2s, v0.2s, #3
+ushr v0.4h, v0.4h, #3
+ushr v0.4s, v0.4s, #3
+ushr v0.8b, v0.8b, #3
+ushr v0.8h, v0.8h, #3
+usqadd b19, b14
+usqadd d18, d22
+usqadd h20, h15
+usqadd s21, s12
+usqadd v0.16b, v0.16b
+usqadd v0.2d, v0.2d
+usqadd v0.2s, v0.2s
+usqadd v0.4h, v0.4h
+usqadd v0.4s, v0.4s
+usqadd v0.8b, v0.8b
+usqadd v0.8h, v0.8h
+usra d20, d13, #61
+usra v0.16b, v0.16b, #3
+usra v0.2d, v0.2d, #3
+usra v0.2s, v0.2s, #3
+usra v0.4h, v0.4h, #3
+usra v0.4s, v0.4s, #3
+usra v0.8b, v0.8b, #3
+usra v0.8h, v0.8h, #3
+usubl v0.2d, v0.2s, v0.2s
+usubl v0.4s, v0.4h, v0.4h
+usubl v0.8h, v0.8b, v0.8b
+usubl2 v0.2d, v0.4s, v0.4s
+usubl2 v0.4s, v0.8h, v0.8h
+usubl2 v0.8h, v0.16b, v0.16b
+usubw v0.2d, v0.2d, v0.2s
+usubw v0.4s, v0.4s, v0.4h
+usubw v0.8h, v0.8h, v0.8b
+usubw2 v0.2d, v0.2d, v0.4s
+usubw2 v0.4s, v0.4s, v0.8h
+usubw2 v0.8h, v0.8h, v0.16b
+uzp1 v0.16b, v0.16b, v0.16b
+uzp1 v0.2d, v0.2d, v0.2d
+uzp1 v0.2s, v0.2s, v0.2s
+uzp1 v0.4h, v0.4h, v0.4h
+uzp1 v0.4s, v0.4s, v0.4s
+uzp1 v0.8b, v0.8b, v0.8b
+uzp1 v0.8h, v0.8h, v0.8h
+uzp2 v0.16b, v0.16b, v0.16b
+uzp2 v0.2d, v0.2d, v0.2d
+uzp2 v0.2s, v0.2s, v0.2s
+uzp2 v0.4h, v0.4h, v0.4h
+uzp2 v0.4s, v0.4s, v0.4s
+uzp2 v0.8b, v0.8b, v0.8b
+uzp2 v0.8h, v0.8h, v0.8h
+xtn v0.2s, v0.2d
+xtn v0.4h, v0.4s
+xtn v0.8b, v0.8h
+xtn2 v0.16b, v0.8h
+xtn2 v0.4s, v0.2d
+xtn2 v0.8h, v0.4s
+zip1 v0.16b, v0.16b, v0.16b
+zip1 v0.2d, v0.2d, v0.2d
+zip1 v0.2s, v0.2s, v0.2s
+zip1 v0.4h, v0.4h, v0.4h
+zip1 v0.4s, v0.4s, v0.4s
+zip1 v0.8b, v0.8b, v0.8b
+zip1 v0.8h, v0.8h, v0.8h
+zip2 v0.16b, v0.16b, v0.16b
+zip2 v0.2d, v0.2d, v0.2d
+zip2 v0.2s, v0.2s, v0.2s
+zip2 v0.4h, v0.4h, v0.4h
+zip2 v0.4s, v0.4s, v0.4s
+zip2 v0.8b, v0.8b, v0.8b
+zip2 v0.8h, v0.8h, v0.8h
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 1 2 0.50 abs d29, d24
+# CHECK-NEXT: 1 2 0.50 abs v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 abs v0.2d, v0.2d
+# CHECK-NEXT: 1 2 0.50 abs v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 abs v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 abs v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 abs v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 abs v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 add d17, d31, d29
+# CHECK-NEXT: 1 2 0.50 add v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 2 6 1.00 addhn v0.2s, v0.2d, v0.2d
+# CHECK-NEXT: 2 6 1.00 addhn v0.4h, v0.4s, v0.4s
+# CHECK-NEXT: 2 6 1.00 addhn v0.8b, v0.8h, v0.8h
+# CHECK-NEXT: 2 6 1.00 addhn2 v0.16b, v0.8h, v0.8h
+# CHECK-NEXT: 2 6 1.00 addhn2 v0.4s, v0.2d, v0.2d
+# CHECK-NEXT: 2 6 1.00 addhn2 v0.8h, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 addp v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: 1 2 0.50 addp v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 and v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 bic v0.4h, #15, lsl #8
+# CHECK-NEXT: 1 2 0.50 bic v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 1 3 0.50 bif v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 1 3 0.50 bit v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 1 3 0.50 bsl v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 cls v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 cls v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 cls v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 cls v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 cls v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 cls v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 clz v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 clz v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 clz v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 clz v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 clz v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 clz v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 cmeq d20, d21, #0
+# CHECK-NEXT: 1 2 0.50 cmeq d20, d21, d22
+# CHECK-NEXT: 1 2 0.50 cmeq v0.16b, v0.16b, #0
+# CHECK-NEXT: 1 2 0.50 cmeq v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 cmge d20, d21, #0
+# CHECK-NEXT: 1 2 0.50 cmge d20, d21, d22
+# CHECK-NEXT: 1 2 0.50 cmge v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 cmge v0.8b, v0.8b, #0
+# CHECK-NEXT: 1 2 0.50 cmgt d20, d21, #0
+# CHECK-NEXT: 1 2 0.50 cmgt d20, d21, d22
+# CHECK-NEXT: 1 2 0.50 cmgt v0.2s, v0.2s, #0
+# CHECK-NEXT: 1 2 0.50 cmgt v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 cmhi d20, d21, d22
+# CHECK-NEXT: 1 2 0.50 cmhi v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 cmhs d20, d21, d22
+# CHECK-NEXT: 1 2 0.50 cmhs v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 cmle d20, d21, #0
+# CHECK-NEXT: 1 2 0.50 cmle v0.2d, v0.2d, #0
+# CHECK-NEXT: 1 2 0.50 cmlt d20, d21, #0
+# CHECK-NEXT: 1 2 0.50 cmlt v0.8h, v0.8h, #0
+# CHECK-NEXT: 1 2 0.50 cmtst d20, d21, d22
+# CHECK-NEXT: 1 2 0.50 cmtst v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 cnt v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 cnt v0.8b, v0.8b
+# CHECK-NEXT: 1 5 1.00 dup v0.16b, w28
+# CHECK-NEXT: 1 5 1.00 dup v0.2d, x28
+# CHECK-NEXT: 1 5 1.00 dup v0.2s, w28
+# CHECK-NEXT: 1 5 1.00 dup v0.4h, w28
+# CHECK-NEXT: 1 5 1.00 dup v0.4s, w28
+# CHECK-NEXT: 1 5 1.00 dup v0.8b, w28
+# CHECK-NEXT: 1 5 1.00 dup v0.8h, w28
+# CHECK-NEXT: 1 2 0.50 eor v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 ext v0.16b, v0.16b, v0.16b, #3
+# CHECK-NEXT: 1 2 0.50 ext v0.8b, v0.8b, v0.8b, #3
+# CHECK-NEXT: 1 3 0.50 fabd d29, d24, d20
+# CHECK-NEXT: 1 3 0.50 fabd s29, s24, s20
+# CHECK-NEXT: 1 3 0.50 fabd v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 fabs v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 fabs v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 fabs v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 fabs v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 fabs v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 facge d20, d21, d22
+# CHECK-NEXT: 1 3 0.50 facge s10, s11, s12
+# CHECK-NEXT: 1 3 0.50 facge v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 facgt d20, d21, d22
+# CHECK-NEXT: 1 3 0.50 facgt s10, s11, s12
+# CHECK-NEXT: 1 3 0.50 facgt v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 fadd v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 faddp v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 faddp v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 fcmeq d20, d21, #0.0
+# CHECK-NEXT: 1 3 0.50 fcmeq d20, d21, d22
+# CHECK-NEXT: 1 3 0.50 fcmeq s10, s11, #0.0
+# CHECK-NEXT: 1 3 0.50 fcmeq s10, s11, s12
+# CHECK-NEXT: 1 3 0.50 fcmeq v0.2s, v0.2s, #0.0
+# CHECK-NEXT: 1 3 0.50 fcmeq v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 fcmge d20, d21, #0.0
+# CHECK-NEXT: 1 3 0.50 fcmge d20, d21, d22
+# CHECK-NEXT: 1 3 0.50 fcmge s10, s11, #0.0
+# CHECK-NEXT: 1 3 0.50 fcmge s10, s11, s12
+# CHECK-NEXT: 1 3 0.50 fcmge v0.2d, v0.2d, #0.0
+# CHECK-NEXT: 1 3 0.50 fcmge v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 fcmgt d20, d21, #0.0
+# CHECK-NEXT: 1 3 0.50 fcmgt d20, d21, d22
+# CHECK-NEXT: 1 3 0.50 fcmgt s10, s11, #0.0
+# CHECK-NEXT: 1 3 0.50 fcmgt s10, s11, s12
+# CHECK-NEXT: 1 3 0.50 fcmgt v0.4s, v0.4s, #0.0
+# CHECK-NEXT: 1 3 0.50 fcmgt v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 fcmle d20, d21, #0.0
+# CHECK-NEXT: 1 3 0.50 fcmle s10, s11, #0.0
+# CHECK-NEXT: 1 3 0.50 fcmle v0.2d, v0.2d, #0.0
+# CHECK-NEXT: 1 3 0.50 fcmlt d20, d21, #0.0
+# CHECK-NEXT: 1 3 0.50 fcmlt s10, s11, #0.0
+# CHECK-NEXT: 1 3 0.50 fcmlt v0.4s, v0.4s, #0.0
+# CHECK-NEXT: 1 3 0.50 fcvtas d21, d14
+# CHECK-NEXT: 1 3 0.50 fcvtas s12, s13
+# CHECK-NEXT: 1 3 0.50 fcvtas v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 fcvtas v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 fcvtas v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 fcvtas v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 fcvtas v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 fcvtau d21, d14
+# CHECK-NEXT: 1 3 0.50 fcvtau s12, s13
+# CHECK-NEXT: 1 3 0.50 fcvtau v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 fcvtau v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 fcvtau v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 fcvtau v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 fcvtau v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 fcvtl v0.2d, v0.2s
+# CHECK-NEXT: 1 3 0.50 fcvtl v0.4s, v0.4h
+# CHECK-NEXT: 1 3 0.50 fcvtl2 v0.2d, v0.4s
+# CHECK-NEXT: 1 3 0.50 fcvtl2 v0.4s, v0.8h
+# CHECK-NEXT: 1 3 0.50 fcvtms d21, d14
+# CHECK-NEXT: 1 3 0.50 fcvtms s22, s13
+# CHECK-NEXT: 1 3 0.50 fcvtms v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 fcvtms v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 fcvtms v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 fcvtms v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 fcvtms v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 fcvtmu d21, d14
+# CHECK-NEXT: 1 3 0.50 fcvtmu s12, s13
+# CHECK-NEXT: 1 3 0.50 fcvtmu v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 fcvtmu v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 fcvtmu v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 fcvtmu v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 fcvtmu v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 fcvtn v0.2s, v0.2d
+# CHECK-NEXT: 1 3 0.50 fcvtn v0.4h, v0.4s
+# CHECK-NEXT: 1 3 0.50 fcvtn2 v0.4s, v0.2d
+# CHECK-NEXT: 1 3 0.50 fcvtn2 v0.8h, v0.4s
+# CHECK-NEXT: 1 3 0.50 fcvtns d21, d14
+# CHECK-NEXT: 1 3 0.50 fcvtns s22, s13
+# CHECK-NEXT: 1 3 0.50 fcvtns v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 fcvtns v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 fcvtns v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 fcvtns v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 fcvtns v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 fcvtnu d21, d14
+# CHECK-NEXT: 1 3 0.50 fcvtnu s12, s13
+# CHECK-NEXT: 1 3 0.50 fcvtnu v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 fcvtnu v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 fcvtnu v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 fcvtnu v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 fcvtnu v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 fcvtps d21, d14
+# CHECK-NEXT: 1 3 0.50 fcvtps s22, s13
+# CHECK-NEXT: 1 3 0.50 fcvtps v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 fcvtps v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 fcvtps v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 fcvtps v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 fcvtps v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 fcvtpu d21, d14
+# CHECK-NEXT: 1 3 0.50 fcvtpu s12, s13
+# CHECK-NEXT: 1 3 0.50 fcvtpu v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 fcvtpu v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 fcvtpu v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 fcvtpu v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 fcvtpu v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 fcvtxn s22, d13
+# CHECK-NEXT: 1 3 0.50 fcvtxn v0.2s, v0.2d
+# CHECK-NEXT: 1 3 0.50 fcvtxn2 v0.4s, v0.2d
+# CHECK-NEXT: 1 3 0.50 fcvtzs d21, d12, #1
+# CHECK-NEXT: 1 3 0.50 fcvtzs d21, d14
+# CHECK-NEXT: 1 3 0.50 fcvtzs s12, s13
+# CHECK-NEXT: 1 3 0.50 fcvtzs s21, s12, #1
+# CHECK-NEXT: 1 3 0.50 fcvtzs v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 fcvtzs v0.2d, v0.2d, #3
+# CHECK-NEXT: 1 3 0.50 fcvtzs v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 fcvtzs v0.2s, v0.2s, #3
+# CHECK-NEXT: 1 3 0.50 fcvtzs v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 fcvtzs v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 fcvtzs v0.4s, v0.4s, #3
+# CHECK-NEXT: 1 3 0.50 fcvtzs v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 fcvtzu d21, d12, #1
+# CHECK-NEXT: 1 3 0.50 fcvtzu d21, d14
+# CHECK-NEXT: 1 3 0.50 fcvtzu s12, s13
+# CHECK-NEXT: 1 3 0.50 fcvtzu s21, s12, #1
+# CHECK-NEXT: 1 3 0.50 fcvtzu v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 fcvtzu v0.2d, v0.2d, #3
+# CHECK-NEXT: 1 3 0.50 fcvtzu v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 fcvtzu v0.2s, v0.2s, #3
+# CHECK-NEXT: 1 3 0.50 fcvtzu v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 fcvtzu v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 fcvtzu v0.4s, v0.4s, #3
+# CHECK-NEXT: 1 3 0.50 fcvtzu v0.8h, v0.8h
+# CHECK-NEXT: 1 12 1.00 fdiv v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 fmax v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 fmax v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 fmax v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 fmaxnm v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 fmaxnm v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 fmaxnm v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 fmaxnmp v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 fmaxnmp v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 fmaxnmp v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 fmaxp v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 fmaxp v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 fmaxp v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 fmin v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 fmin v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 fmin v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 fminnm v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 fminnm v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 fminnm v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 fminnmp v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 fminnmp v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 fminnmp v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 fminp v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 fminp v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 fminp v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 4 0.50 fmla d0, d1, v0.d[1]
+# CHECK-NEXT: 1 4 0.50 fmla s0, s1, v0.s[3]
+# CHECK-NEXT: 1 4 0.50 fmla v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 4 0.50 fmls d0, d4, v0.d[1]
+# CHECK-NEXT: 1 4 0.50 fmls s3, s5, v0.s[3]
+# CHECK-NEXT: 1 4 0.50 fmls v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 fmov v0.2d, #-1.25000000
+# CHECK-NEXT: 1 2 0.50 fmov v0.2s, #13.00000000
+# CHECK-NEXT: 1 2 0.50 fmov v0.4s, #1.00000000
+# CHECK-NEXT: 1 4 0.50 fmul d0, d1, v0.d[1]
+# CHECK-NEXT: 1 4 0.50 fmul s0, s1, v0.s[3]
+# CHECK-NEXT: 1 4 0.50 fmul v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 4 0.50 fmulx d0, d4, v0.d[1]
+# CHECK-NEXT: 1 4 0.50 fmulx d23, d11, d1
+# CHECK-NEXT: 1 4 0.50 fmulx s20, s22, s15
+# CHECK-NEXT: 1 4 0.50 fmulx s3, s5, v0.s[3]
+# CHECK-NEXT: 1 4 0.50 fmulx v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: 1 4 0.50 fmulx v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 4 0.50 fmulx v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 fneg v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 fneg v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 fneg v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 fneg v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 fneg v0.8h, v0.8h
+# CHECK-NEXT: 2 6 1.00 frecpe d13, d13
+# CHECK-NEXT: 2 6 1.00 frecpe s19, s14
+# CHECK-NEXT: 2 6 1.00 frecpe v0.2d, v0.2d
+# CHECK-NEXT: 2 6 1.00 frecpe v0.2s, v0.2s
+# CHECK-NEXT: 2 6 1.00 frecpe v0.4h, v0.4h
+# CHECK-NEXT: 2 6 1.00 frecpe v0.4s, v0.4s
+# CHECK-NEXT: 2 6 1.00 frecpe v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 frecps v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 frecps d22, d30, d21
+# CHECK-NEXT: 1 3 0.50 frecps s21, s16, s13
+# CHECK-NEXT: 1 3 0.50 frecpx d16, d19
+# CHECK-NEXT: 1 3 0.50 frecpx s18, s10
+# CHECK-NEXT: 1 3 0.50 frinta v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 frinta v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 frinta v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 frinta v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 frinta v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 frinti v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 frinti v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 frinti v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 frinti v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 frinti v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 frintm v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 frintm v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 frintm v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 frintm v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 frintm v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 frintn v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 frintn v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 frintn v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 frintn v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 frintn v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 frintp v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 frintp v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 frintp v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 frintp v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 frintp v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 frintx v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 frintx v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 frintx v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 frintx v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 frintx v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 frintz v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 frintz v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 frintz v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 frintz v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 frintz v0.8h, v0.8h
+# CHECK-NEXT: 2 6 1.00 frsqrte d21, d12
+# CHECK-NEXT: 2 6 1.00 frsqrte s22, s13
+# CHECK-NEXT: 2 6 1.00 frsqrte v0.2d, v0.2d
+# CHECK-NEXT: 2 6 1.00 frsqrte v0.2s, v0.2s
+# CHECK-NEXT: 2 6 1.00 frsqrte v0.4h, v0.4h
+# CHECK-NEXT: 2 6 1.00 frsqrte v0.4s, v0.4s
+# CHECK-NEXT: 2 6 1.00 frsqrte v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 frsqrts d8, d22, d18
+# CHECK-NEXT: 1 3 0.50 frsqrts s21, s5, s12
+# CHECK-NEXT: 1 3 0.50 frsqrts v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: 1 63 1.00 fsqrt v0.2d, v0.2d
+# CHECK-NEXT: 1 33 1.00 fsqrt v0.2s, v0.2s
+# CHECK-NEXT: 1 39 1.00 fsqrt v0.4h, v0.4h
+# CHECK-NEXT: 1 33 1.00 fsqrt v0.4s, v0.4s
+# CHECK-NEXT: 1 39 1.00 fsqrt v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 fsub v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 4 0.50 * ld1 { v0.16b }, [x0]
+# CHECK-NEXT: 3 5 1.50 * ld1 { v0.2d, v1.2d, v2.2d }, [x0], #48
+# CHECK-NEXT: 4 5 2.00 * ld1 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0]
+# CHECK-NEXT: 2 4 1.00 * ld1 { v0.4s, v1.4s }, [sp], #32
+# CHECK-NEXT: 3 5 1.50 * ld1 { v0.4s, v1.4s, v2.4s }, [sp]
+# CHECK-NEXT: 4 5 2.00 * ld1 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0], x3
+# CHECK-NEXT: 1 4 0.50 * ld1 { v0.8h }, [x15], x2
+# CHECK-NEXT: 2 4 1.00 * ld1 { v0.8h, v1.8h }, [x15]
+# CHECK-NEXT: 2 6 0.50 * ld1 { v0.b }[9], [x0]
+# CHECK-NEXT: 2 6 0.50 * ld1 { v0.b }[9], [x0], #1
+# CHECK-NEXT: 2 6 0.50 * ld1r { v0.16b }, [x0]
+# CHECK-NEXT: 2 6 0.50 * ld1r { v0.16b }, [x0], #1
+# CHECK-NEXT: 2 6 0.50 * ld1r { v0.8h }, [x15]
+# CHECK-NEXT: 2 6 0.50 * ld1r { v0.8h }, [x15], #2
+# CHECK-NEXT: 4 6 1.00 * ld2 { v0.16b, v1.16b }, [x0], x1
+# CHECK-NEXT: 5 8 1.50 * ld2 { v0.8b, v1.8b }, [x0]
+# CHECK-NEXT: 4 6 1.00 * ld2 { v0.h, v1.h }[7], [x15]
+# CHECK-NEXT: 4 6 1.00 * ld2 { v0.h, v1.h }[7], [x15], #4
+# CHECK-NEXT: 4 6 1.00 * ld2r { v0.2d, v1.2d }, [x0]
+# CHECK-NEXT: 4 6 1.00 * ld2r { v0.2d, v1.2d }, [x0], #16
+# CHECK-NEXT: 4 6 1.00 * ld2r { v0.4s, v1.4s }, [sp]
+# CHECK-NEXT: 4 6 1.00 * ld2r { v0.4s, v1.4s }, [sp], #8
+# CHECK-NEXT: 6 9 1.50 * ld3 { v0.4h, v1.4h, v2.4h }, [x15]
+# CHECK-NEXT: 6 8 1.50 * ld3 { v0.8h, v1.8h, v2.8h }, [x15], x2
+# CHECK-NEXT: 6 7 1.50 * ld3 { v0.s, v1.s, v2.s }[3], [sp]
+# CHECK-NEXT: 6 7 1.50 * ld3 { v0.s, v1.s, v2.s }[3], [sp], x3
+# CHECK-NEXT: 6 7 1.50 * ld3r { v0.4h, v1.4h, v2.4h }, [x15]
+# CHECK-NEXT: 6 7 1.50 * ld3r { v0.4h, v1.4h, v2.4h }, [x15], #6
+# CHECK-NEXT: 6 7 1.50 * ld3r { v0.8b, v1.8b, v2.8b }, [x0]
+# CHECK-NEXT: 6 7 1.50 * ld3r { v0.8b, v1.8b, v2.8b }, [x0], #3
+# CHECK-NEXT: 12 11 2.00 * ld4 { v0.2s, v1.2s, v2.2s, v3.2s }, [sp]
+# CHECK-NEXT: 12 10 2.00 * ld4 { v0.4s, v1.4s, v2.4s, v3.4s }, [sp], #64
+# CHECK-NEXT: 8 7 2.00 * ld4 { v0.d, v1.d, v2.d, v3.d }[1], [x0]
+# CHECK-NEXT: 8 7 2.00 * ld4 { v0.d, v1.d, v2.d, v3.d }[1], [x0], #32
+# CHECK-NEXT: 8 7 2.00 * ld4 { v0.h, v1.h, v2.h, v3.h }[7], [x0], x0
+# CHECK-NEXT: 4 5 2.00 * ld4r { v0.1d, v1.1d, v2.1d, v3.1d }, [sp]
+# CHECK-NEXT: 4 5 2.00 * ld4r { v0.1d, v1.1d, v2.1d, v3.1d }, [sp], x7
+# CHECK-NEXT: 8 7 2.00 * ld4r { v0.2s, v1.2s, v2.2s, v3.2s }, [sp]
+# CHECK-NEXT: 8 7 2.00 * ld4r { v0.2s, v1.2s, v2.2s, v3.2s }, [sp], x30
+# CHECK-NEXT: 1 3 0.50 mla v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 1 3 0.50 mls v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 mov b0, v0.b[15]
+# CHECK-NEXT: 1 3 0.50 mov d6, v0.d[1]
+# CHECK-NEXT: 1 3 0.50 mov h2, v0.h[5]
+# CHECK-NEXT: 1 3 0.50 mov s17, v0.s[2]
+# CHECK-NEXT: 1 2 0.50 mov v2.b[0], v0.b[0]
+# CHECK-NEXT: 1 2 0.50 mov v2.h[1], v0.h[1]
+# CHECK-NEXT: 1 2 0.50 mov v2.s[2], v0.s[2]
+# CHECK-NEXT: 1 2 0.50 mov v2.d[1], v0.d[1]
+# CHECK-NEXT: 2 7 1.00 mov v0.b[0], w8
+# CHECK-NEXT: 2 7 1.00 mov v0.h[1], w8
+# CHECK-NEXT: 2 7 1.00 mov v0.s[2], w8
+# CHECK-NEXT: 2 7 1.00 mov v0.d[1], x8
+# CHECK-NEXT: 1 2 0.50 mov v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 mov v0.8b, v0.8b
+# CHECK-NEXT: 1 3 0.50 movi d15, #0xff00ff00ff00ff
+# CHECK-NEXT: 1 2 0.50 movi v0.16b, #31
+# CHECK-NEXT: 1 2 0.50 movi v0.2d, #0xff0000ff0000ffff
+# CHECK-NEXT: 1 2 0.50 movi v0.2s, #8, msl #8
+# CHECK-NEXT: 1 2 0.50 movi v0.4s, #255, lsl #24
+# CHECK-NEXT: 1 2 0.50 movi v0.8b, #255
+# CHECK-NEXT: 1 3 0.50 mul v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 mvni v0.2s, #0
+# CHECK-NEXT: 1 2 0.50 mvni v0.4s, #16, msl #16
+# CHECK-NEXT: 1 3 0.50 neg d29, d24
+# CHECK-NEXT: 1 3 0.50 neg v0.16b, v0.16b
+# CHECK-NEXT: 1 3 0.50 neg v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 neg v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 neg v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 neg v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 neg v0.8b, v0.8b
+# CHECK-NEXT: 1 3 0.50 neg v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 mvn v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 mvn v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 orn v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 mov v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 orr v0.8h, #31
+# CHECK-NEXT: 1 2 0.50 pmul v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 pmul v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 pmull v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 pmull2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: 2 6 1.00 raddhn v0.2s, v0.2d, v0.2d
+# CHECK-NEXT: 2 6 1.00 raddhn v0.4h, v0.4s, v0.4s
+# CHECK-NEXT: 2 6 1.00 raddhn v0.8b, v0.8h, v0.8h
+# CHECK-NEXT: 2 6 1.00 raddhn2 v0.16b, v0.8h, v0.8h
+# CHECK-NEXT: 2 6 1.00 raddhn2 v0.4s, v0.2d, v0.2d
+# CHECK-NEXT: 2 6 1.00 raddhn2 v0.8h, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 rbit v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 rbit v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 rev16 v21.8b, v1.8b
+# CHECK-NEXT: 1 2 0.50 rev16 v30.16b, v31.16b
+# CHECK-NEXT: 1 2 0.50 rev32 v0.4h, v9.4h
+# CHECK-NEXT: 1 2 0.50 rev32 v21.8b, v1.8b
+# CHECK-NEXT: 1 2 0.50 rev32 v30.16b, v31.16b
+# CHECK-NEXT: 1 2 0.50 rev32 v4.8h, v7.8h
+# CHECK-NEXT: 1 2 0.50 rev64 v0.16b, v31.16b
+# CHECK-NEXT: 1 2 0.50 rev64 v1.8b, v9.8b
+# CHECK-NEXT: 1 2 0.50 rev64 v13.4h, v21.4h
+# CHECK-NEXT: 1 2 0.50 rev64 v2.8h, v4.8h
+# CHECK-NEXT: 1 2 0.50 rev64 v4.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 rev64 v6.4s, v8.4s
+# CHECK-NEXT: 2 6 1.00 rshrn v0.2s, v0.2d, #3
+# CHECK-NEXT: 2 6 1.00 rshrn v0.4h, v0.4s, #3
+# CHECK-NEXT: 2 6 1.00 rshrn v0.8b, v0.8h, #3
+# CHECK-NEXT: 2 6 1.00 rshrn2 v0.16b, v0.8h, #3
+# CHECK-NEXT: 2 6 1.00 rshrn2 v0.4s, v0.2d, #3
+# CHECK-NEXT: 2 6 1.00 rshrn2 v0.8h, v0.4s, #3
+# CHECK-NEXT: 2 6 1.00 rsubhn v0.2s, v0.2d, v0.2d
+# CHECK-NEXT: 2 6 1.00 rsubhn v0.4h, v0.4s, v0.4s
+# CHECK-NEXT: 2 6 1.00 rsubhn v0.8b, v0.8h, v0.8h
+# CHECK-NEXT: 2 6 1.00 rsubhn2 v0.16b, v0.8h, v0.8h
+# CHECK-NEXT: 2 6 1.00 rsubhn2 v0.4s, v0.2d, v0.2d
+# CHECK-NEXT: 2 6 1.00 rsubhn2 v0.8h, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 saba v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 sabal v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 sabal v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 sabal v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 sabal2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 sabal2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 sabal2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 sabd v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 sabdl v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 sabdl v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 sabdl v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 sabdl2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 sabdl2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 sabdl2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 sadalp v0.1d, v0.2s
+# CHECK-NEXT: 1 2 0.50 sadalp v0.2d, v0.4s
+# CHECK-NEXT: 1 2 0.50 sadalp v0.2s, v0.4h
+# CHECK-NEXT: 1 2 0.50 sadalp v0.4h, v0.8b
+# CHECK-NEXT: 1 2 0.50 sadalp v0.4s, v0.8h
+# CHECK-NEXT: 1 2 0.50 sadalp v0.8h, v0.16b
+# CHECK-NEXT: 1 2 0.50 saddl v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 saddl v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 saddl v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 saddl2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 saddl2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 saddl2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 saddlp v0.1d, v0.2s
+# CHECK-NEXT: 1 2 0.50 saddlp v0.2d, v0.4s
+# CHECK-NEXT: 1 2 0.50 saddlp v0.2s, v0.4h
+# CHECK-NEXT: 1 2 0.50 saddlp v0.4h, v0.8b
+# CHECK-NEXT: 1 2 0.50 saddlp v0.4s, v0.8h
+# CHECK-NEXT: 1 2 0.50 saddlp v0.8h, v0.16b
+# CHECK-NEXT: 1 2 0.50 saddw v0.2d, v0.2d, v0.2s
+# CHECK-NEXT: 1 2 0.50 saddw v0.4s, v0.4s, v0.4h
+# CHECK-NEXT: 1 2 0.50 saddw v0.8h, v0.8h, v0.8b
+# CHECK-NEXT: 1 2 0.50 saddw2 v0.2d, v0.2d, v0.4s
+# CHECK-NEXT: 1 2 0.50 saddw2 v0.4s, v0.4s, v0.8h
+# CHECK-NEXT: 1 2 0.50 saddw2 v0.8h, v0.8h, v0.16b
+# CHECK-NEXT: 1 3 0.50 scvtf d21, d12
+# CHECK-NEXT: 1 3 0.50 scvtf d21, d12, #64
+# CHECK-NEXT: 1 3 0.50 scvtf s22, s13
+# CHECK-NEXT: 1 3 0.50 scvtf s22, s13, #32
+# CHECK-NEXT: 1 3 0.50 scvtf v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 scvtf v0.2d, v0.2d, #3
+# CHECK-NEXT: 1 3 0.50 scvtf v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 scvtf v0.2s, v0.2s, #3
+# CHECK-NEXT: 1 3 0.50 scvtf v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 scvtf v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 scvtf v0.4s, v0.4s, #3
+# CHECK-NEXT: 1 3 0.50 scvtf v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 shadd v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 1 3 0.50 shl d7, d10, #12
+# CHECK-NEXT: 1 3 0.50 shl v0.16b, v0.16b, #3
+# CHECK-NEXT: 1 3 0.50 shl v0.2d, v0.2d, #3
+# CHECK-NEXT: 1 3 0.50 shl v0.4h, v0.4h, #3
+# CHECK-NEXT: 1 3 0.50 shl v0.4s, v0.4s, #3
+# CHECK-NEXT: 1 3 0.50 shll v0.2d, v0.2s, #32
+# CHECK-NEXT: 1 3 0.50 shll v0.4s, v0.4h, #16
+# CHECK-NEXT: 1 3 0.50 shll v0.8h, v0.8b, #8
+# CHECK-NEXT: 1 3 0.50 shll v0.2d, v0.2s, #32
+# CHECK-NEXT: 1 3 0.50 shll v0.4s, v0.4h, #16
+# CHECK-NEXT: 1 3 0.50 shll v0.8h, v0.8b, #8
+# CHECK-NEXT: 1 3 0.50 shll2 v0.2d, v0.4s, #32
+# CHECK-NEXT: 1 3 0.50 shll2 v0.4s, v0.8h, #16
+# CHECK-NEXT: 1 3 0.50 shll2 v0.8h, v0.16b, #8
+# CHECK-NEXT: 1 3 0.50 shll2 v0.2d, v0.4s, #32
+# CHECK-NEXT: 1 3 0.50 shll2 v0.4s, v0.8h, #16
+# CHECK-NEXT: 1 3 0.50 shll2 v0.8h, v0.16b, #8
+# CHECK-NEXT: 2 6 1.00 shrn v0.2s, v0.2d, #3
+# CHECK-NEXT: 2 6 1.00 shrn v0.4h, v0.4s, #3
+# CHECK-NEXT: 2 6 1.00 shrn v0.8b, v0.8h, #3
+# CHECK-NEXT: 2 6 1.00 shrn2 v0.16b, v0.8h, #3
+# CHECK-NEXT: 2 6 1.00 shrn2 v0.4s, v0.2d, #3
+# CHECK-NEXT: 2 6 1.00 shrn2 v0.8h, v0.4s, #3
+# CHECK-NEXT: 1 2 0.50 shsub v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 shsub v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 sli d10, d14, #12
+# CHECK-NEXT: 1 3 0.50 sli v0.16b, v0.16b, #3
+# CHECK-NEXT: 1 3 0.50 sli v0.2d, v0.2d, #3
+# CHECK-NEXT: 1 3 0.50 sli v0.2s, v0.2s, #3
+# CHECK-NEXT: 1 3 0.50 sli v0.4h, v0.4h, #3
+# CHECK-NEXT: 1 3 0.50 sli v0.4s, v0.4s, #3
+# CHECK-NEXT: 1 3 0.50 sli v0.8b, v0.8b, #3
+# CHECK-NEXT: 1 3 0.50 sli v0.8h, v0.8h, #3
+# CHECK-NEXT: 1 2 0.50 smax v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 smax v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 smax v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 smaxp v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 smaxp v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 smaxp v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 smin v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 smin v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 smin v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 sminp v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 sminp v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 sminp v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 smlal v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 smlal v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 smlal v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: 1 3 0.50 smlal2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 smlal2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 smlal2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: 1 3 0.50 smlsl v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 smlsl v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 smlsl v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: 1 3 0.50 smlsl2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 smlsl2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 smlsl2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: 1 3 0.50 smull v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 smull v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 smull v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: 1 3 0.50 smull2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 smull2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 smull2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 sqabs b19, b14
+# CHECK-NEXT: 1 2 0.50 sqabs d18, d12
+# CHECK-NEXT: 1 2 0.50 sqabs h21, h15
+# CHECK-NEXT: 1 2 0.50 sqabs s20, s12
+# CHECK-NEXT: 1 2 0.50 sqabs v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 sqabs v0.2d, v0.2d
+# CHECK-NEXT: 1 2 0.50 sqabs v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 sqabs v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 sqabs v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 sqabs v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 sqabs v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 sqadd b20, b11, b15
+# CHECK-NEXT: 1 2 0.50 sqadd v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 sqadd v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 sqdmlal d19, s24, s12
+# CHECK-NEXT: 1 3 0.50 sqdmlal d8, s9, v0.s[1]
+# CHECK-NEXT: 1 3 0.50 sqdmlal s0, h0, v0.h[3]
+# CHECK-NEXT: 1 3 0.50 sqdmlal s17, h27, h12
+# CHECK-NEXT: 1 3 0.50 sqdmlal v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 sqdmlal v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 sqdmlal2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 sqdmlal2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 sqdmlsl d12, s23, s13
+# CHECK-NEXT: 1 3 0.50 sqdmlsl d8, s9, v0.s[1]
+# CHECK-NEXT: 1 3 0.50 sqdmlsl s0, h0, v0.h[3]
+# CHECK-NEXT: 1 3 0.50 sqdmlsl s14, h12, h25
+# CHECK-NEXT: 1 3 0.50 sqdmlsl v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 sqdmlsl v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 sqdmlsl2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 sqdmlsl2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 sqdmulh h10, h11, h12
+# CHECK-NEXT: 1 3 0.50 sqdmulh h7, h15, v0.h[3]
+# CHECK-NEXT: 1 3 0.50 sqdmulh s15, s14, v0.s[1]
+# CHECK-NEXT: 1 3 0.50 sqdmulh s20, s21, s2
+# CHECK-NEXT: 1 3 0.50 sqdmulh v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 sqdmulh v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 sqdmull d1, s1, v0.s[1]
+# CHECK-NEXT: 1 3 0.50 sqdmull d15, s22, s12
+# CHECK-NEXT: 1 3 0.50 sqdmull s1, h1, v0.h[3]
+# CHECK-NEXT: 1 3 0.50 sqdmull s12, h22, h12
+# CHECK-NEXT: 1 3 0.50 sqdmull v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 sqdmull v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 sqdmull2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 sqdmull2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 sqneg b19, b14
+# CHECK-NEXT: 1 2 0.50 sqneg d18, d12
+# CHECK-NEXT: 1 2 0.50 sqneg h21, h15
+# CHECK-NEXT: 1 2 0.50 sqneg s20, s12
+# CHECK-NEXT: 1 2 0.50 sqneg v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 sqneg v0.2d, v0.2d
+# CHECK-NEXT: 1 2 0.50 sqneg v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 sqneg v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 sqneg v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 sqneg v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 sqneg v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 sqrdmulh h10, h11, h12
+# CHECK-NEXT: 1 3 0.50 sqrdmulh h7, h15, v0.h[3]
+# CHECK-NEXT: 1 3 0.50 sqrdmulh s15, s14, v0.s[1]
+# CHECK-NEXT: 1 3 0.50 sqrdmulh s20, s21, s2
+# CHECK-NEXT: 1 3 0.50 sqrdmulh v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 sqrdmulh v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 sqrshl d31, d31, d31
+# CHECK-NEXT: 1 2 0.50 sqrshl h3, h4, h15
+# CHECK-NEXT: 1 2 0.50 sqrshl v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 sqrshl v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 sqrshl v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 1 3 0.50 sqrshrn b10, h13, #2
+# CHECK-NEXT: 1 3 0.50 sqrshrn h15, s10, #6
+# CHECK-NEXT: 1 3 0.50 sqrshrn s15, d12, #9
+# CHECK-NEXT: 1 2 0.50 sqrshrn v0.2s, v0.2d, #3
+# CHECK-NEXT: 1 2 0.50 sqrshrn v0.4h, v0.4s, #3
+# CHECK-NEXT: 1 2 0.50 sqrshrn v0.8b, v0.8h, #3
+# CHECK-NEXT: 1 2 0.50 sqrshrn2 v0.16b, v0.8h, #3
+# CHECK-NEXT: 1 2 0.50 sqrshrn2 v0.4s, v0.2d, #3
+# CHECK-NEXT: 1 2 0.50 sqrshrn2 v0.8h, v0.4s, #3
+# CHECK-NEXT: 1 3 0.50 sqrshrun b17, h10, #6
+# CHECK-NEXT: 1 3 0.50 sqrshrun h10, s13, #15
+# CHECK-NEXT: 1 3 0.50 sqrshrun s22, d16, #31
+# CHECK-NEXT: 1 2 0.50 sqrshrun v0.2s, v0.2d, #3
+# CHECK-NEXT: 1 2 0.50 sqrshrun v0.4h, v0.4s, #3
+# CHECK-NEXT: 1 2 0.50 sqrshrun v0.8b, v0.8h, #3
+# CHECK-NEXT: 1 2 0.50 sqrshrun2 v0.16b, v0.8h, #3
+# CHECK-NEXT: 1 2 0.50 sqrshrun2 v0.4s, v0.2d, #3
+# CHECK-NEXT: 1 2 0.50 sqrshrun2 v0.8h, v0.4s, #3
+# CHECK-NEXT: 1 2 0.50 sqshl b11, b19, #7
+# CHECK-NEXT: 1 2 0.50 sqshl d15, d16, #51
+# CHECK-NEXT: 1 2 0.50 sqshl d31, d31, d31
+# CHECK-NEXT: 1 2 0.50 sqshl h13, h18, #11
+# CHECK-NEXT: 1 2 0.50 sqshl h3, h4, h15
+# CHECK-NEXT: 1 2 0.50 sqshl s14, s17, #22
+# CHECK-NEXT: 1 2 0.50 sqshl v0.16b, v0.16b, #3
+# CHECK-NEXT: 1 2 0.50 sqshl v0.2d, v0.2d, #3
+# CHECK-NEXT: 1 2 0.50 sqshl v0.2s, v0.2s, #3
+# CHECK-NEXT: 1 2 0.50 sqshl v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 sqshl v0.4h, v0.4h, #3
+# CHECK-NEXT: 1 2 0.50 sqshl v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 sqshl v0.4s, v0.4s, #3
+# CHECK-NEXT: 1 2 0.50 sqshl v0.8b, v0.8b, #3
+# CHECK-NEXT: 1 2 0.50 sqshl v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 sqshl v0.8h, v0.8h, #3
+# CHECK-NEXT: 1 2 0.50 sqshlu b15, b18, #6
+# CHECK-NEXT: 1 2 0.50 sqshlu d11, d13, #32
+# CHECK-NEXT: 1 2 0.50 sqshlu h19, h17, #6
+# CHECK-NEXT: 1 2 0.50 sqshlu s16, s14, #25
+# CHECK-NEXT: 1 2 0.50 sqshlu v0.16b, v0.16b, #3
+# CHECK-NEXT: 1 2 0.50 sqshlu v0.2d, v0.2d, #3
+# CHECK-NEXT: 1 2 0.50 sqshlu v0.2s, v0.2s, #3
+# CHECK-NEXT: 1 2 0.50 sqshlu v0.4h, v0.4h, #3
+# CHECK-NEXT: 1 2 0.50 sqshlu v0.4s, v0.4s, #3
+# CHECK-NEXT: 1 2 0.50 sqshlu v0.8b, v0.8b, #3
+# CHECK-NEXT: 1 2 0.50 sqshlu v0.8h, v0.8h, #3
+# CHECK-NEXT: 1 3 0.50 sqshrn b10, h15, #5
+# CHECK-NEXT: 1 3 0.50 sqshrn h17, s10, #4
+# CHECK-NEXT: 1 3 0.50 sqshrn s18, d10, #31
+# CHECK-NEXT: 2 6 1.00 sqshrn v0.2s, v0.2d, #3
+# CHECK-NEXT: 2 6 1.00 sqshrn v0.4h, v0.4s, #3
+# CHECK-NEXT: 2 6 1.00 sqshrn v0.8b, v0.8h, #3
+# CHECK-NEXT: 2 6 1.00 sqshrn2 v0.16b, v0.8h, #3
+# CHECK-NEXT: 2 6 1.00 sqshrn2 v0.4s, v0.2d, #3
+# CHECK-NEXT: 2 6 1.00 sqshrn2 v0.8h, v0.4s, #3
+# CHECK-NEXT: 1 3 0.50 sqshrun b15, h10, #7
+# CHECK-NEXT: 1 3 0.50 sqshrun h20, s14, #3
+# CHECK-NEXT: 1 3 0.50 sqshrun s10, d15, #15
+# CHECK-NEXT: 2 6 1.00 sqshrun v0.2s, v0.2d, #3
+# CHECK-NEXT: 2 6 1.00 sqshrun v0.4h, v0.4s, #3
+# CHECK-NEXT: 2 6 1.00 sqshrun v0.8b, v0.8h, #3
+# CHECK-NEXT: 2 6 1.00 sqshrun2 v0.16b, v0.8h, #3
+# CHECK-NEXT: 2 6 1.00 sqshrun2 v0.4s, v0.2d, #3
+# CHECK-NEXT: 2 6 1.00 sqshrun2 v0.8h, v0.4s, #3
+# CHECK-NEXT: 1 2 0.50 sqsub s20, s10, s7
+# CHECK-NEXT: 1 2 0.50 sqsub v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: 1 2 0.50 sqsub v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 sqsub v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 sqxtn b18, h18
+# CHECK-NEXT: 1 2 0.50 sqxtn h20, s17
+# CHECK-NEXT: 1 2 0.50 sqxtn s19, d14
+# CHECK-NEXT: 1 2 0.50 sqxtn v0.2s, v0.2d
+# CHECK-NEXT: 1 2 0.50 sqxtn v0.4h, v0.4s
+# CHECK-NEXT: 1 2 0.50 sqxtn v0.8b, v0.8h
+# CHECK-NEXT: 1 2 0.50 sqxtn2 v0.16b, v0.8h
+# CHECK-NEXT: 1 2 0.50 sqxtn2 v0.4s, v0.2d
+# CHECK-NEXT: 1 2 0.50 sqxtn2 v0.8h, v0.4s
+# CHECK-NEXT: 1 2 0.50 sqxtun b19, h14
+# CHECK-NEXT: 1 2 0.50 sqxtun h21, s15
+# CHECK-NEXT: 1 2 0.50 sqxtun s20, d12
+# CHECK-NEXT: 1 2 0.50 sqxtun v0.2s, v0.2d
+# CHECK-NEXT: 1 2 0.50 sqxtun v0.4h, v0.4s
+# CHECK-NEXT: 1 2 0.50 sqxtun v0.8b, v0.8h
+# CHECK-NEXT: 1 2 0.50 sqxtun2 v0.16b, v0.8h
+# CHECK-NEXT: 1 2 0.50 sqxtun2 v0.4s, v0.2d
+# CHECK-NEXT: 1 2 0.50 sqxtun2 v0.8h, v0.4s
+# CHECK-NEXT: 1 2 0.50 srhadd v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 srhadd v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 srhadd v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 1 3 0.50 sri d10, d12, #14
+# CHECK-NEXT: 1 3 0.50 sri v0.16b, v0.16b, #3
+# CHECK-NEXT: 1 3 0.50 sri v0.2d, v0.2d, #3
+# CHECK-NEXT: 1 3 0.50 sri v0.2s, v0.2s, #3
+# CHECK-NEXT: 1 3 0.50 sri v0.4h, v0.4h, #3
+# CHECK-NEXT: 1 3 0.50 sri v0.4s, v0.4s, #3
+# CHECK-NEXT: 1 3 0.50 sri v0.8b, v0.8b, #3
+# CHECK-NEXT: 1 3 0.50 sri v0.8h, v0.8h, #3
+# CHECK-NEXT: 1 3 0.50 srshl d16, d16, d16
+# CHECK-NEXT: 1 3 0.50 srshl v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 srshl v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 srshl v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 1 3 0.50 srshr d19, d18, #7
+# CHECK-NEXT: 1 3 0.50 srshr v0.16b, v0.16b, #3
+# CHECK-NEXT: 1 3 0.50 srshr v0.2d, v0.2d, #3
+# CHECK-NEXT: 1 3 0.50 srshr v0.2s, v0.2s, #3
+# CHECK-NEXT: 1 3 0.50 srshr v0.4h, v0.4h, #3
+# CHECK-NEXT: 1 3 0.50 srshr v0.4s, v0.4s, #3
+# CHECK-NEXT: 1 3 0.50 srshr v0.8b, v0.8b, #3
+# CHECK-NEXT: 1 3 0.50 srshr v0.8h, v0.8h, #3
+# CHECK-NEXT: 1 3 0.50 srsra d15, d11, #19
+# CHECK-NEXT: 1 2 0.50 srsra v0.16b, v0.16b, #3
+# CHECK-NEXT: 1 2 0.50 srsra v0.2d, v0.2d, #3
+# CHECK-NEXT: 1 2 0.50 srsra v0.2s, v0.2s, #3
+# CHECK-NEXT: 1 2 0.50 srsra v0.4h, v0.4h, #3
+# CHECK-NEXT: 1 2 0.50 srsra v0.4s, v0.4s, #3
+# CHECK-NEXT: 1 2 0.50 srsra v0.8b, v0.8b, #3
+# CHECK-NEXT: 1 2 0.50 srsra v0.8h, v0.8h, #3
+# CHECK-NEXT: 1 3 0.50 sshl d31, d31, d31
+# CHECK-NEXT: 1 2 0.50 sshl v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 sshl v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 sshl v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 sshl v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 1 3 0.50 sshll v0.2d, v0.2s, #3
+# CHECK-NEXT: 1 3 0.50 sshll2 v0.4s, v0.8h, #3
+# CHECK-NEXT: 1 3 0.50 sshr d15, d16, #12
+# CHECK-NEXT: 1 3 0.50 sshr v0.16b, v0.16b, #3
+# CHECK-NEXT: 1 3 0.50 sshr v0.2d, v0.2d, #3
+# CHECK-NEXT: 1 3 0.50 sshr v0.2s, v0.2s, #3
+# CHECK-NEXT: 1 3 0.50 sshr v0.4h, v0.4h, #3
+# CHECK-NEXT: 1 3 0.50 sshr v0.4s, v0.4s, #3
+# CHECK-NEXT: 1 3 0.50 sshr v0.8b, v0.8b, #3
+# CHECK-NEXT: 1 3 0.50 sshr v0.8h, v0.8h, #3
+# CHECK-NEXT: 1 3 0.50 ssra d18, d12, #21
+# CHECK-NEXT: 1 2 0.50 ssra v0.16b, v0.16b, #3
+# CHECK-NEXT: 1 2 0.50 ssra v0.2d, v0.2d, #3
+# CHECK-NEXT: 1 2 0.50 ssra v0.2s, v0.2s, #3
+# CHECK-NEXT: 1 2 0.50 ssra v0.4h, v0.4h, #3
+# CHECK-NEXT: 1 2 0.50 ssra v0.4s, v0.4s, #3
+# CHECK-NEXT: 1 2 0.50 ssra v0.8b, v0.8b, #3
+# CHECK-NEXT: 1 2 0.50 ssra v0.8h, v0.8h, #3
+# CHECK-NEXT: 1 2 0.50 ssubl v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 ssubl v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 ssubl v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 ssubl2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 ssubl2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 ssubl2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 ssubw v0.2d, v0.2d, v0.2s
+# CHECK-NEXT: 1 2 0.50 ssubw v0.4s, v0.4s, v0.4h
+# CHECK-NEXT: 1 2 0.50 ssubw v0.8h, v0.8h, v0.8b
+# CHECK-NEXT: 1 2 0.50 ssubw2 v0.2d, v0.2d, v0.4s
+# CHECK-NEXT: 1 2 0.50 ssubw2 v0.4s, v0.4s, v0.8h
+# CHECK-NEXT: 1 2 0.50 ssubw2 v0.8h, v0.8h, v0.16b
+# CHECK-NEXT: 2 2 1.00 * st1 { v0.16b }, [x0]
+# CHECK-NEXT: 6 4 3.00 * st1 { v0.2d, v1.2d, v2.2d }, [x0], #48
+# CHECK-NEXT: 8 5 4.00 * st1 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0]
+# CHECK-NEXT: 4 3 2.00 * st1 { v0.4s, v1.4s }, [sp], #32
+# CHECK-NEXT: 6 4 3.00 * st1 { v0.4s, v1.4s, v2.4s }, [sp]
+# CHECK-NEXT: 8 5 4.00 * st1 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0], x3
+# CHECK-NEXT: 2 2 1.00 * st1 { v0.8h }, [x15], x2
+# CHECK-NEXT: 4 3 2.00 * st1 { v0.8h, v1.8h }, [x15]
+# CHECK-NEXT: 3 4 1.00 * st1 { v0.d }[1], [x0]
+# CHECK-NEXT: 3 4 1.00 * st1 { v0.d }[1], [x0], #8
+# CHECK-NEXT: 6 5 2.00 * st2 { v0.16b, v1.16b }, [x0], x1
+# CHECK-NEXT: 6 6 2.00 * st2 { v0.8b, v1.8b }, [x0]
+# CHECK-NEXT: 6 5 2.00 * st2 { v0.s, v1.s }[3], [sp]
+# CHECK-NEXT: 6 5 2.00 * st2 { v0.s, v1.s }[3], [sp], #8
+# CHECK-NEXT: 9 6 3.00 * st3 { v0.4h, v1.4h, v2.4h }, [x15]
+# CHECK-NEXT: 9 6 3.00 * st3 { v0.8h, v1.8h, v2.8h }, [x15], x2
+# CHECK-NEXT: 9 6 3.00 * st3 { v0.h, v1.h, v2.h }[7], [x15]
+# CHECK-NEXT: 9 6 3.00 * st3 { v0.h, v1.h, v2.h }[7], [x15], #6
+# CHECK-NEXT: 14 9 4.00 * st4 { v0.2s, v1.2s, v2.2s, v3.2s }, [sp]
+# CHECK-NEXT: 12 7 4.00 * st4 { v0.4s, v1.4s, v2.4s, v3.4s }, [sp], #64
+# CHECK-NEXT: 12 7 4.00 * st4 { v0.b, v1.b, v2.b, v3.b }[9], [x0]
+# CHECK-NEXT: 12 7 4.00 * st4 { v0.b, v1.b, v2.b, v3.b }[9], [x0], x5
+# CHECK-NEXT: 1 2 0.50 sub d15, d5, d16
+# CHECK-NEXT: 1 2 0.50 sub v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: 1 2 0.50 suqadd b19, b14
+# CHECK-NEXT: 1 2 0.50 suqadd d18, d22
+# CHECK-NEXT: 1 2 0.50 suqadd h20, h15
+# CHECK-NEXT: 1 2 0.50 suqadd s21, s12
+# CHECK-NEXT: 1 2 0.50 suqadd v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 suqadd v0.2d, v0.2d
+# CHECK-NEXT: 1 2 0.50 suqadd v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 suqadd v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 suqadd v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 suqadd v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 suqadd v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 tbl v0.16b, { v0.16b }, v0.16b
+# CHECK-NEXT: 2 4 1.00 tbl v0.16b, { v0.16b, v1.16b }, v0.16b
+# CHECK-NEXT: 3 6 1.50 tbl v0.16b, { v0.16b, v1.16b, v2.16b }, v0.16b
+# CHECK-NEXT: 4 8 2.00 tbl v0.16b, { v0.16b, v1.16b, v2.16b, v3.16b }, v0.16b
+# CHECK-NEXT: 1 2 0.50 tbl v0.8b, { v0.16b }, v0.8b
+# CHECK-NEXT: 2 4 1.00 tbl v0.8b, { v0.16b, v1.16b }, v0.8b
+# CHECK-NEXT: 3 6 1.50 tbl v0.8b, { v0.16b, v1.16b, v2.16b }, v0.8b
+# CHECK-NEXT: 4 8 2.00 tbl v0.8b, { v0.16b, v1.16b, v2.16b, v3.16b }, v0.8b
+# CHECK-NEXT: 1 2 0.50 tbx v0.16b, { v0.16b }, v0.16b
+# CHECK-NEXT: 2 4 1.00 tbx v0.16b, { v0.16b, v1.16b }, v0.16b
+# CHECK-NEXT: 3 6 1.50 tbx v0.16b, { v0.16b, v1.16b, v2.16b }, v0.16b
+# CHECK-NEXT: 4 8 2.00 tbx v0.16b, { v0.16b, v1.16b, v2.16b, v3.16b }, v0.16b
+# CHECK-NEXT: 1 2 0.50 tbx v0.8b, { v0.16b }, v0.8b
+# CHECK-NEXT: 2 4 1.00 tbx v0.8b, { v0.16b, v1.16b }, v0.8b
+# CHECK-NEXT: 3 6 1.50 tbx v0.8b, { v0.16b, v1.16b, v2.16b }, v0.8b
+# CHECK-NEXT: 4 8 2.00 tbx v0.8b, { v0.16b, v1.16b, v2.16b, v3.16b }, v0.8b
+# CHECK-NEXT: 1 2 0.50 trn1 v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 trn1 v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: 1 2 0.50 trn1 v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 trn1 v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 trn1 v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 trn1 v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 trn1 v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 trn2 v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 trn2 v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: 1 2 0.50 trn2 v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 trn2 v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 trn2 v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 trn2 v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 trn2 v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 uaba v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 uabal v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 uabal v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 uabal v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 uabal2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 uabal2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 uabal2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 uabd v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 uabdl v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 uabdl v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 uabdl v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 uabdl2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 uabdl2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 uabdl2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 uadalp v0.1d, v0.2s
+# CHECK-NEXT: 1 2 0.50 uadalp v0.2d, v0.4s
+# CHECK-NEXT: 1 2 0.50 uadalp v0.2s, v0.4h
+# CHECK-NEXT: 1 2 0.50 uadalp v0.4h, v0.8b
+# CHECK-NEXT: 1 2 0.50 uadalp v0.4s, v0.8h
+# CHECK-NEXT: 1 2 0.50 uadalp v0.8h, v0.16b
+# CHECK-NEXT: 1 2 0.50 uaddl v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 uaddl v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 uaddl v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 uaddl2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 uaddl2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 uaddl2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 uaddlp v0.1d, v0.2s
+# CHECK-NEXT: 1 2 0.50 uaddlp v0.2d, v0.4s
+# CHECK-NEXT: 1 2 0.50 uaddlp v0.2s, v0.4h
+# CHECK-NEXT: 1 2 0.50 uaddlp v0.4h, v0.8b
+# CHECK-NEXT: 1 2 0.50 uaddlp v0.4s, v0.8h
+# CHECK-NEXT: 1 2 0.50 uaddlp v0.8h, v0.16b
+# CHECK-NEXT: 1 2 0.50 uaddw v0.2d, v0.2d, v0.2s
+# CHECK-NEXT: 1 2 0.50 uaddw v0.4s, v0.4s, v0.4h
+# CHECK-NEXT: 1 2 0.50 uaddw v0.8h, v0.8h, v0.8b
+# CHECK-NEXT: 1 2 0.50 uaddw2 v0.2d, v0.2d, v0.4s
+# CHECK-NEXT: 1 2 0.50 uaddw2 v0.4s, v0.4s, v0.8h
+# CHECK-NEXT: 1 2 0.50 uaddw2 v0.8h, v0.8h, v0.16b
+# CHECK-NEXT: 1 3 0.50 ucvtf d21, d14
+# CHECK-NEXT: 1 3 0.50 ucvtf d21, d14, #64
+# CHECK-NEXT: 1 3 0.50 ucvtf s22, s13
+# CHECK-NEXT: 1 3 0.50 ucvtf s22, s13, #32
+# CHECK-NEXT: 1 3 0.50 ucvtf v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 ucvtf v0.2d, v0.2d, #3
+# CHECK-NEXT: 1 3 0.50 ucvtf v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 ucvtf v0.2s, v0.2s, #3
+# CHECK-NEXT: 1 3 0.50 ucvtf v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 ucvtf v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 ucvtf v0.4s, v0.4s, #3
+# CHECK-NEXT: 1 3 0.50 ucvtf v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 uhadd v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 uhadd v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 uhsub v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 umax v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 umax v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 umax v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 umaxp v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 umaxp v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 umaxp v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 umin v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 umin v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 umin v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 uminp v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 uminp v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 uminp v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 1 3 0.50 umlal v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 umlal v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 umlal v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: 1 3 0.50 umlal2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 umlal2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 umlal2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: 1 3 0.50 umlsl v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 umlsl v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 umlsl v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: 1 3 0.50 umlsl2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 umlsl2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 umlsl2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: 1 3 0.50 umull v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: 1 3 0.50 umull v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: 1 3 0.50 umull v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: 1 3 0.50 umull2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 umull2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 umull2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 uqadd h0, h1, h5
+# CHECK-NEXT: 1 2 0.50 uqadd v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 uqrshl b11, b20, b30
+# CHECK-NEXT: 1 2 0.50 uqrshl s23, s20, s16
+# CHECK-NEXT: 1 2 0.50 uqrshl v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 uqrshl v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 uqrshl v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 uqrshl v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 uqrshrn b10, h12, #5
+# CHECK-NEXT: 1 3 0.50 uqrshrn h12, s10, #14
+# CHECK-NEXT: 1 3 0.50 uqrshrn s10, d10, #25
+# CHECK-NEXT: 1 2 0.50 uqrshrn v0.2s, v0.2d, #3
+# CHECK-NEXT: 1 2 0.50 uqrshrn v0.4h, v0.4s, #3
+# CHECK-NEXT: 1 2 0.50 uqrshrn v0.8b, v0.8h, #3
+# CHECK-NEXT: 1 2 0.50 uqrshrn2 v0.16b, v0.8h, #3
+# CHECK-NEXT: 1 2 0.50 uqrshrn2 v0.4s, v0.2d, #3
+# CHECK-NEXT: 1 2 0.50 uqrshrn2 v0.8h, v0.4s, #3
+# CHECK-NEXT: 1 2 0.50 uqshl b11, b20, b30
+# CHECK-NEXT: 1 2 0.50 uqshl b18, b15, #6
+# CHECK-NEXT: 1 2 0.50 uqshl d15, d12, #19
+# CHECK-NEXT: 1 2 0.50 uqshl h11, h18, #7
+# CHECK-NEXT: 1 2 0.50 uqshl s14, s19, #18
+# CHECK-NEXT: 1 2 0.50 uqshl s23, s20, s16
+# CHECK-NEXT: 1 2 0.50 uqshl v0.16b, v0.16b, #3
+# CHECK-NEXT: 1 2 0.50 uqshl v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 uqshl v0.2d, v0.2d, #3
+# CHECK-NEXT: 1 2 0.50 uqshl v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: 1 2 0.50 uqshl v0.2s, v0.2s, #3
+# CHECK-NEXT: 1 2 0.50 uqshl v0.4h, v0.4h, #3
+# CHECK-NEXT: 1 2 0.50 uqshl v0.4s, v0.4s, #3
+# CHECK-NEXT: 1 2 0.50 uqshl v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 uqshl v0.8b, v0.8b, #3
+# CHECK-NEXT: 1 2 0.50 uqshl v0.8h, v0.8h, #3
+# CHECK-NEXT: 1 2 0.50 uqshl v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 uqshrn b12, h10, #7
+# CHECK-NEXT: 1 3 0.50 uqshrn h10, s14, #5
+# CHECK-NEXT: 1 3 0.50 uqshrn s10, d12, #13
+# CHECK-NEXT: 1 2 0.50 uqshrn v0.2s, v0.2d, #3
+# CHECK-NEXT: 1 2 0.50 uqshrn v0.4h, v0.4s, #3
+# CHECK-NEXT: 1 2 0.50 uqshrn v0.8b, v0.8h, #3
+# CHECK-NEXT: 1 2 0.50 uqshrn2 v0.16b, v0.8h, #3
+# CHECK-NEXT: 1 2 0.50 uqshrn2 v0.4s, v0.2d, #3
+# CHECK-NEXT: 1 2 0.50 uqshrn2 v0.8h, v0.4s, #3
+# CHECK-NEXT: 1 2 0.50 uqsub d16, d16, d16
+# CHECK-NEXT: 1 2 0.50 uqsub v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: 2 6 1.00 uqxtn b18, h18
+# CHECK-NEXT: 2 6 1.00 uqxtn h20, s17
+# CHECK-NEXT: 2 6 1.00 uqxtn s19, d14
+# CHECK-NEXT: 2 6 1.00 uqxtn v0.2s, v0.2d
+# CHECK-NEXT: 2 6 1.00 uqxtn v0.4h, v0.4s
+# CHECK-NEXT: 2 6 1.00 uqxtn v0.8b, v0.8h
+# CHECK-NEXT: 2 6 1.00 uqxtn2 v0.16b, v0.8h
+# CHECK-NEXT: 2 6 1.00 uqxtn2 v0.4s, v0.2d
+# CHECK-NEXT: 2 6 1.00 uqxtn2 v0.8h, v0.4s
+# CHECK-NEXT: 1 2 0.50 urecpe v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 urecpe v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 urhadd v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 urhadd v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 urhadd v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 urshl d8, d7, d4
+# CHECK-NEXT: 1 3 0.50 urshl v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 1 3 0.50 urshl v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: 1 3 0.50 urshl v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 urshl v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 urshr d20, d23, #31
+# CHECK-NEXT: 1 3 0.50 urshr v0.16b, v0.16b, #3
+# CHECK-NEXT: 1 3 0.50 urshr v0.2d, v0.2d, #3
+# CHECK-NEXT: 1 3 0.50 urshr v0.2s, v0.2s, #3
+# CHECK-NEXT: 1 3 0.50 urshr v0.4h, v0.4h, #3
+# CHECK-NEXT: 1 3 0.50 urshr v0.4s, v0.4s, #3
+# CHECK-NEXT: 1 3 0.50 urshr v0.8b, v0.8b, #3
+# CHECK-NEXT: 1 3 0.50 urshr v0.8h, v0.8h, #3
+# CHECK-NEXT: 1 2 0.50 ursqrte v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 ursqrte v0.4s, v0.4s
+# CHECK-NEXT: 1 3 0.50 ursra d18, d10, #13
+# CHECK-NEXT: 1 2 0.50 ursra v0.16b, v0.16b, #3
+# CHECK-NEXT: 1 2 0.50 ursra v0.2d, v0.2d, #3
+# CHECK-NEXT: 1 2 0.50 ursra v0.2s, v0.2s, #3
+# CHECK-NEXT: 1 2 0.50 ursra v0.4h, v0.4h, #3
+# CHECK-NEXT: 1 2 0.50 ursra v0.4s, v0.4s, #3
+# CHECK-NEXT: 1 2 0.50 ursra v0.8b, v0.8b, #3
+# CHECK-NEXT: 1 2 0.50 ursra v0.8h, v0.8h, #3
+# CHECK-NEXT: 1 3 0.50 ushl d0, d0, d0
+# CHECK-NEXT: 1 2 0.50 ushl v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 ushl v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 ushl v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 ushll v0.4s, v0.4h, #3
+# CHECK-NEXT: 1 3 0.50 ushll2 v0.8h, v0.16b, #3
+# CHECK-NEXT: 1 3 0.50 ushr d10, d17, #18
+# CHECK-NEXT: 1 3 0.50 ushr v0.16b, v0.16b, #3
+# CHECK-NEXT: 1 3 0.50 ushr v0.2d, v0.2d, #3
+# CHECK-NEXT: 1 3 0.50 ushr v0.2s, v0.2s, #3
+# CHECK-NEXT: 1 3 0.50 ushr v0.4h, v0.4h, #3
+# CHECK-NEXT: 1 3 0.50 ushr v0.4s, v0.4s, #3
+# CHECK-NEXT: 1 3 0.50 ushr v0.8b, v0.8b, #3
+# CHECK-NEXT: 1 3 0.50 ushr v0.8h, v0.8h, #3
+# CHECK-NEXT: 1 2 0.50 usqadd b19, b14
+# CHECK-NEXT: 1 2 0.50 usqadd d18, d22
+# CHECK-NEXT: 1 2 0.50 usqadd h20, h15
+# CHECK-NEXT: 1 2 0.50 usqadd s21, s12
+# CHECK-NEXT: 1 2 0.50 usqadd v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 usqadd v0.2d, v0.2d
+# CHECK-NEXT: 1 2 0.50 usqadd v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 usqadd v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 usqadd v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 usqadd v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 usqadd v0.8h, v0.8h
+# CHECK-NEXT: 1 3 0.50 usra d20, d13, #61
+# CHECK-NEXT: 1 2 0.50 usra v0.16b, v0.16b, #3
+# CHECK-NEXT: 1 2 0.50 usra v0.2d, v0.2d, #3
+# CHECK-NEXT: 1 2 0.50 usra v0.2s, v0.2s, #3
+# CHECK-NEXT: 1 2 0.50 usra v0.4h, v0.4h, #3
+# CHECK-NEXT: 1 2 0.50 usra v0.4s, v0.4s, #3
+# CHECK-NEXT: 1 2 0.50 usra v0.8b, v0.8b, #3
+# CHECK-NEXT: 1 2 0.50 usra v0.8h, v0.8h, #3
+# CHECK-NEXT: 1 2 0.50 usubl v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 usubl v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 usubl v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 usubl2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 usubl2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 usubl2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 usubw v0.2d, v0.2d, v0.2s
+# CHECK-NEXT: 1 2 0.50 usubw v0.4s, v0.4s, v0.4h
+# CHECK-NEXT: 1 2 0.50 usubw v0.8h, v0.8h, v0.8b
+# CHECK-NEXT: 1 2 0.50 usubw2 v0.2d, v0.2d, v0.4s
+# CHECK-NEXT: 1 2 0.50 usubw2 v0.4s, v0.4s, v0.8h
+# CHECK-NEXT: 1 2 0.50 usubw2 v0.8h, v0.8h, v0.16b
+# CHECK-NEXT: 1 2 0.50 uzp1 v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 uzp1 v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: 1 2 0.50 uzp1 v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 uzp1 v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 uzp1 v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 uzp1 v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 uzp1 v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 uzp2 v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 uzp2 v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: 1 2 0.50 uzp2 v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 uzp2 v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 uzp2 v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 uzp2 v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 uzp2 v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 xtn v0.2s, v0.2d
+# CHECK-NEXT: 1 2 0.50 xtn v0.4h, v0.4s
+# CHECK-NEXT: 1 2 0.50 xtn v0.8b, v0.8h
+# CHECK-NEXT: 1 2 0.50 xtn2 v0.16b, v0.8h
+# CHECK-NEXT: 1 2 0.50 xtn2 v0.4s, v0.2d
+# CHECK-NEXT: 1 2 0.50 xtn2 v0.8h, v0.4s
+# CHECK-NEXT: 1 2 0.50 zip1 v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 zip1 v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: 1 2 0.50 zip1 v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 zip1 v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 zip1 v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 zip1 v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 zip1 v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: 1 2 0.50 zip2 v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 1 2 0.50 zip2 v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: 1 2 0.50 zip2 v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: 1 2 0.50 zip2 v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: 1 2 0.50 zip2 v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 1 2 0.50 zip2 v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: 1 2 0.50 zip2 v0.8h, v0.8h, v0.8h
+
+# CHECK: Resources:
+# CHECK-NEXT: [0.0] - Ampere1BUnitA
+# CHECK-NEXT: [0.1] - Ampere1BUnitA
+# CHECK-NEXT: [1.0] - Ampere1BUnitB
+# CHECK-NEXT: [1.1] - Ampere1BUnitB
+# CHECK-NEXT: [2] - Ampere1BUnitBS
+# CHECK-NEXT: [3.0] - Ampere1BUnitL
+# CHECK-NEXT: [3.1] - Ampere1BUnitL
+# CHECK-NEXT: [4.0] - Ampere1BUnitS
+# CHECK-NEXT: [4.1] - Ampere1BUnitS
+# CHECK-NEXT: [5] - Ampere1BUnitX
+# CHECK-NEXT: [6] - Ampere1BUnitY
+# CHECK-NEXT: [7] - Ampere1BUnitZ
+
+# CHECK: Resource pressure per iteration:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4.0] [4.1] [5] [6] [7]
+# CHECK-NEXT: - - - - 11.00 51.00 51.00 29.00 29.00 604.50 584.50 58.00
+
+# CHECK: Resource pressure by instruction:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4.0] [4.1] [5] [6] [7] Instructions:
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - abs d29, d24
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - abs v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - abs v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - abs v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - abs v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - abs v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - abs v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - abs v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - add d17, d31, d29
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - add v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - addhn v0.2s, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - addhn v0.4h, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - addhn v0.8b, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - addhn2 v0.16b, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - addhn2 v0.4s, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - addhn2 v0.8h, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - addp v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - addp v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - and v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - bic v0.4h, #15, lsl #8
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - bic v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - bif v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - bit v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - bsl v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cls v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cls v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cls v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cls v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cls v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cls v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - clz v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - clz v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - clz v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - clz v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - clz v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - clz v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cmeq d20, d21, #0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cmeq d20, d21, d22
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cmeq v0.16b, v0.16b, #0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cmeq v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cmge d20, d21, #0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cmge d20, d21, d22
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cmge v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cmge v0.8b, v0.8b, #0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cmgt d20, d21, #0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cmgt d20, d21, d22
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cmgt v0.2s, v0.2s, #0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cmgt v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cmhi d20, d21, d22
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cmhi v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cmhs d20, d21, d22
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cmhs v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cmle d20, d21, #0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cmle v0.2d, v0.2d, #0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cmlt d20, d21, #0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cmlt v0.8h, v0.8h, #0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cmtst d20, d21, d22
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cmtst v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cnt v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - cnt v0.8b, v0.8b
+# CHECK-NEXT: - - - - 1.00 - - - - - - - dup v0.16b, w28
+# CHECK-NEXT: - - - - 1.00 - - - - - - - dup v0.2d, x28
+# CHECK-NEXT: - - - - 1.00 - - - - - - - dup v0.2s, w28
+# CHECK-NEXT: - - - - 1.00 - - - - - - - dup v0.4h, w28
+# CHECK-NEXT: - - - - 1.00 - - - - - - - dup v0.4s, w28
+# CHECK-NEXT: - - - - 1.00 - - - - - - - dup v0.8b, w28
+# CHECK-NEXT: - - - - 1.00 - - - - - - - dup v0.8h, w28
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - eor v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ext v0.16b, v0.16b, v0.16b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ext v0.8b, v0.8b, v0.8b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fabd d29, d24, d20
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fabd s29, s24, s20
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fabd v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fabs v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fabs v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fabs v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fabs v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fabs v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - facge d20, d21, d22
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - facge s10, s11, s12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - facge v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - facgt d20, d21, d22
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - facgt s10, s11, s12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - facgt v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fadd v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - faddp v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - faddp v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcmeq d20, d21, #0.0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcmeq d20, d21, d22
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcmeq s10, s11, #0.0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcmeq s10, s11, s12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcmeq v0.2s, v0.2s, #0.0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcmeq v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcmge d20, d21, #0.0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcmge d20, d21, d22
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcmge s10, s11, #0.0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcmge s10, s11, s12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcmge v0.2d, v0.2d, #0.0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcmge v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcmgt d20, d21, #0.0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcmgt d20, d21, d22
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcmgt s10, s11, #0.0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcmgt s10, s11, s12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcmgt v0.4s, v0.4s, #0.0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcmgt v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcmle d20, d21, #0.0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcmle s10, s11, #0.0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcmle v0.2d, v0.2d, #0.0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcmlt d20, d21, #0.0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcmlt s10, s11, #0.0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcmlt v0.4s, v0.4s, #0.0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtas d21, d14
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtas s12, s13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtas v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtas v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtas v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtas v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtas v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtau d21, d14
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtau s12, s13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtau v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtau v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtau v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtau v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtau v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtl v0.2d, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtl v0.4s, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtl2 v0.2d, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtl2 v0.4s, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtms d21, d14
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtms s22, s13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtms v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtms v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtms v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtms v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtms v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtmu d21, d14
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtmu s12, s13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtmu v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtmu v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtmu v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtmu v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtmu v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtn v0.2s, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtn v0.4h, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtn2 v0.4s, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtn2 v0.8h, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtns d21, d14
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtns s22, s13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtns v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtns v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtns v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtns v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtns v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtnu d21, d14
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtnu s12, s13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtnu v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtnu v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtnu v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtnu v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtnu v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtps d21, d14
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtps s22, s13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtps v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtps v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtps v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtps v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtps v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtpu d21, d14
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtpu s12, s13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtpu v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtpu v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtpu v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtpu v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtpu v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtxn s22, d13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtxn v0.2s, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtxn2 v0.4s, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtzs d21, d12, #1
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtzs d21, d14
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtzs s12, s13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtzs s21, s12, #1
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtzs v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtzs v0.2d, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtzs v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtzs v0.2s, v0.2s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtzs v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtzs v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtzs v0.4s, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtzs v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtzu d21, d12, #1
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtzu d21, d14
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtzu s12, s13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtzu s21, s12, #1
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtzu v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtzu v0.2d, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtzu v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtzu v0.2s, v0.2s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtzu v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtzu v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtzu v0.4s, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fcvtzu v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 1.00 - - fdiv v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmax v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmax v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmax v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmaxnm v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmaxnm v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmaxnm v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmaxnmp v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmaxnmp v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmaxnmp v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmaxp v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmaxp v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmaxp v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmin v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmin v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmin v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fminnm v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fminnm v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fminnm v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fminnmp v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fminnmp v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fminnmp v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fminp v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fminp v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fminp v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmla d0, d1, v0.d[1]
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmla s0, s1, v0.s[3]
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmla v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmls d0, d4, v0.d[1]
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmls s3, s5, v0.s[3]
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmls v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmov v0.2d, #-1.25000000
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmov v0.2s, #13.00000000
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmov v0.4s, #1.00000000
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmul d0, d1, v0.d[1]
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmul s0, s1, v0.s[3]
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmul v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmulx d0, d4, v0.d[1]
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmulx d23, d11, d1
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmulx s20, s22, s15
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmulx s3, s5, v0.s[3]
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmulx v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmulx v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fmulx v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fneg v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fneg v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fneg v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fneg v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fneg v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 1.00 - - frecpe d13, d13
+# CHECK-NEXT: - - - - - - - - - 1.00 - - frecpe s19, s14
+# CHECK-NEXT: - - - - - - - - - 1.00 - - frecpe v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 1.00 - - frecpe v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 1.00 - - frecpe v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 1.00 - - frecpe v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 1.00 - - frecpe v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frecps v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frecps d22, d30, d21
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frecps s21, s16, s13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frecpx d16, d19
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frecpx s18, s10
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frinta v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frinta v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frinta v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frinta v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frinta v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frinti v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frinti v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frinti v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frinti v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frinti v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintm v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintm v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintm v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintm v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintm v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintn v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintn v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintn v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintn v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintn v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintp v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintp v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintp v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintp v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintp v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintx v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintx v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintx v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintx v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintx v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintz v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintz v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintz v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintz v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frintz v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 1.00 - - frsqrte d21, d12
+# CHECK-NEXT: - - - - - - - - - 1.00 - - frsqrte s22, s13
+# CHECK-NEXT: - - - - - - - - - 1.00 - - frsqrte v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 1.00 - - frsqrte v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 1.00 - - frsqrte v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 1.00 - - frsqrte v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 1.00 - - frsqrte v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frsqrts d8, d22, d18
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frsqrts s21, s5, s12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - frsqrts v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 1.00 - - fsqrt v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 1.00 - - fsqrt v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 1.00 - - fsqrt v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 1.00 - - fsqrt v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 1.00 - - fsqrt v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - fsub v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ld1 { v0.16b }, [x0]
+# CHECK-NEXT: - - - - - 1.50 1.50 - - - - - ld1 { v0.2d, v1.2d, v2.2d }, [x0], #48
+# CHECK-NEXT: - - - - - 2.00 2.00 - - - - - ld1 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0]
+# CHECK-NEXT: - - - - - 1.00 1.00 - - - - - ld1 { v0.4s, v1.4s }, [sp], #32
+# CHECK-NEXT: - - - - - 1.50 1.50 - - - - - ld1 { v0.4s, v1.4s, v2.4s }, [sp]
+# CHECK-NEXT: - - - - - 2.00 2.00 - - - - - ld1 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0], x3
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - ld1 { v0.8h }, [x15], x2
+# CHECK-NEXT: - - - - - 1.00 1.00 - - - - - ld1 { v0.8h, v1.8h }, [x15]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - 0.50 0.50 - ld1 { v0.b }[9], [x0]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - 0.50 0.50 - ld1 { v0.b }[9], [x0], #1
+# CHECK-NEXT: - - - - - 0.50 0.50 - - 0.50 0.50 - ld1r { v0.16b }, [x0]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - 0.50 0.50 - ld1r { v0.16b }, [x0], #1
+# CHECK-NEXT: - - - - - 0.50 0.50 - - 0.50 0.50 - ld1r { v0.8h }, [x15]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - 0.50 0.50 - ld1r { v0.8h }, [x15], #2
+# CHECK-NEXT: - - - - - 1.00 1.00 - - 1.00 1.00 - ld2 { v0.16b, v1.16b }, [x0], x1
+# CHECK-NEXT: - - - - - 1.00 1.00 - - 1.50 1.50 - ld2 { v0.8b, v1.8b }, [x0]
+# CHECK-NEXT: - - - - - 1.00 1.00 - - 1.00 1.00 - ld2 { v0.h, v1.h }[7], [x15]
+# CHECK-NEXT: - - - - - 1.00 1.00 - - 1.00 1.00 - ld2 { v0.h, v1.h }[7], [x15], #4
+# CHECK-NEXT: - - - - - 1.00 1.00 - - 1.00 1.00 - ld2r { v0.2d, v1.2d }, [x0]
+# CHECK-NEXT: - - - - - 1.00 1.00 - - 1.00 1.00 - ld2r { v0.2d, v1.2d }, [x0], #16
+# CHECK-NEXT: - - - - - 1.00 1.00 - - 1.00 1.00 - ld2r { v0.4s, v1.4s }, [sp]
+# CHECK-NEXT: - - - - - 1.00 1.00 - - 1.00 1.00 - ld2r { v0.4s, v1.4s }, [sp], #8
+# CHECK-NEXT: - - - - - 1.50 1.50 - - 1.50 1.50 - ld3 { v0.4h, v1.4h, v2.4h }, [x15]
+# CHECK-NEXT: - - - - - 1.50 1.50 - - 1.50 1.50 - ld3 { v0.8h, v1.8h, v2.8h }, [x15], x2
+# CHECK-NEXT: - - - - - 1.50 1.50 - - 1.50 1.50 - ld3 { v0.s, v1.s, v2.s }[3], [sp]
+# CHECK-NEXT: - - - - - 1.50 1.50 - - 1.50 1.50 - ld3 { v0.s, v1.s, v2.s }[3], [sp], x3
+# CHECK-NEXT: - - - - - 1.50 1.50 - - 1.50 1.50 - ld3r { v0.4h, v1.4h, v2.4h }, [x15]
+# CHECK-NEXT: - - - - - 1.50 1.50 - - 1.50 1.50 - ld3r { v0.4h, v1.4h, v2.4h }, [x15], #6
+# CHECK-NEXT: - - - - - 1.50 1.50 - - 1.50 1.50 - ld3r { v0.8b, v1.8b, v2.8b }, [x0]
+# CHECK-NEXT: - - - - - 1.50 1.50 - - 1.50 1.50 - ld3r { v0.8b, v1.8b, v2.8b }, [x0], #3
+# CHECK-NEXT: - - - - - 2.00 2.00 - - 2.00 2.00 - ld4 { v0.2s, v1.2s, v2.2s, v3.2s }, [sp]
+# CHECK-NEXT: - - - - - 2.00 2.00 - - 2.00 2.00 - ld4 { v0.4s, v1.4s, v2.4s, v3.4s }, [sp], #64
+# CHECK-NEXT: - - - - - 2.00 2.00 - - 2.00 2.00 - ld4 { v0.d, v1.d, v2.d, v3.d }[1], [x0]
+# CHECK-NEXT: - - - - - 2.00 2.00 - - 2.00 2.00 - ld4 { v0.d, v1.d, v2.d, v3.d }[1], [x0], #32
+# CHECK-NEXT: - - - - - 2.00 2.00 - - 2.00 2.00 - ld4 { v0.h, v1.h, v2.h, v3.h }[7], [x0], x0
+# CHECK-NEXT: - - - - - 2.00 2.00 - - - - - ld4r { v0.1d, v1.1d, v2.1d, v3.1d }, [sp]
+# CHECK-NEXT: - - - - - 2.00 2.00 - - - - - ld4r { v0.1d, v1.1d, v2.1d, v3.1d }, [sp], x7
+# CHECK-NEXT: - - - - - 2.00 2.00 - - 2.00 2.00 - ld4r { v0.2s, v1.2s, v2.2s, v3.2s }, [sp]
+# CHECK-NEXT: - - - - - 2.00 2.00 - - 2.00 2.00 - ld4r { v0.2s, v1.2s, v2.2s, v3.2s }, [sp], x30
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - mla v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - mls v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - mov b0, v0.b[15]
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - mov d6, v0.d[1]
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - mov h2, v0.h[5]
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - mov s17, v0.s[2]
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - mov v2.b[0], v0.b[0]
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - mov v2.h[1], v0.h[1]
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - mov v2.s[2], v0.s[2]
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - mov v2.d[1], v0.d[1]
+# CHECK-NEXT: - - - - 1.00 - - - - 0.50 0.50 - mov v0.b[0], w8
+# CHECK-NEXT: - - - - 1.00 - - - - 0.50 0.50 - mov v0.h[1], w8
+# CHECK-NEXT: - - - - 1.00 - - - - 0.50 0.50 - mov v0.s[2], w8
+# CHECK-NEXT: - - - - 1.00 - - - - 0.50 0.50 - mov v0.d[1], x8
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - mov v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - mov v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - movi d15, #0xff00ff00ff00ff
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - movi v0.16b, #31
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - movi v0.2d, #0xff0000ff0000ffff
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - movi v0.2s, #8, msl #8
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - movi v0.4s, #255, lsl #24
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - movi v0.8b, #255
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - mul v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - mvni v0.2s, #0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - mvni v0.4s, #16, msl #16
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - neg d29, d24
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - neg v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - neg v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - neg v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - neg v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - neg v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - neg v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - neg v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - mvn v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - mvn v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - orn v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - mov v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - orr v0.8h, #31
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - pmul v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - pmul v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - pmull v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - pmull2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - raddhn v0.2s, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - raddhn v0.4h, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - raddhn v0.8b, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - raddhn2 v0.16b, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - raddhn2 v0.4s, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - raddhn2 v0.8h, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - rbit v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - rbit v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - rev16 v21.8b, v1.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - rev16 v30.16b, v31.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - rev32 v0.4h, v9.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - rev32 v21.8b, v1.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - rev32 v30.16b, v31.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - rev32 v4.8h, v7.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - rev64 v0.16b, v31.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - rev64 v1.8b, v9.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - rev64 v13.4h, v21.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - rev64 v2.8h, v4.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - rev64 v4.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - rev64 v6.4s, v8.4s
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - rshrn v0.2s, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - rshrn v0.4h, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - rshrn v0.8b, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - rshrn2 v0.16b, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - rshrn2 v0.4s, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - rshrn2 v0.8h, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - rsubhn v0.2s, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - rsubhn v0.4h, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - rsubhn v0.8b, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - rsubhn2 v0.16b, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - rsubhn2 v0.4s, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - rsubhn2 v0.8h, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - saba v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sabal v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sabal v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sabal v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sabal2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sabal2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sabal2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sabd v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sabdl v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sabdl v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sabdl v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sabdl2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sabdl2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sabdl2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sadalp v0.1d, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sadalp v0.2d, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sadalp v0.2s, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sadalp v0.4h, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sadalp v0.4s, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sadalp v0.8h, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - saddl v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - saddl v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - saddl v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - saddl2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - saddl2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - saddl2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - saddlp v0.1d, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - saddlp v0.2d, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - saddlp v0.2s, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - saddlp v0.4h, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - saddlp v0.4s, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - saddlp v0.8h, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - saddw v0.2d, v0.2d, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - saddw v0.4s, v0.4s, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - saddw v0.8h, v0.8h, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - saddw2 v0.2d, v0.2d, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - saddw2 v0.4s, v0.4s, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - saddw2 v0.8h, v0.8h, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - scvtf d21, d12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - scvtf d21, d12, #64
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - scvtf s22, s13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - scvtf s22, s13, #32
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - scvtf v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - scvtf v0.2d, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - scvtf v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - scvtf v0.2s, v0.2s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - scvtf v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - scvtf v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - scvtf v0.4s, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - scvtf v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - shadd v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - shl d7, d10, #12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - shl v0.16b, v0.16b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - shl v0.2d, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - shl v0.4h, v0.4h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - shl v0.4s, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - shll v0.2d, v0.2s, #32
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - shll v0.4s, v0.4h, #16
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - shll v0.8h, v0.8b, #8
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - shll v0.2d, v0.2s, #32
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - shll v0.4s, v0.4h, #16
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - shll v0.8h, v0.8b, #8
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - shll2 v0.2d, v0.4s, #32
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - shll2 v0.4s, v0.8h, #16
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - shll2 v0.8h, v0.16b, #8
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - shll2 v0.2d, v0.4s, #32
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - shll2 v0.4s, v0.8h, #16
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - shll2 v0.8h, v0.16b, #8
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - shrn v0.2s, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - shrn v0.4h, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - shrn v0.8b, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - shrn2 v0.16b, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - shrn2 v0.4s, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - shrn2 v0.8h, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - shsub v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - shsub v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sli d10, d14, #12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sli v0.16b, v0.16b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sli v0.2d, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sli v0.2s, v0.2s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sli v0.4h, v0.4h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sli v0.4s, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sli v0.8b, v0.8b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sli v0.8h, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smax v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smax v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smax v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smaxp v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smaxp v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smaxp v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smin v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smin v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smin v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sminp v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sminp v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sminp v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smlal v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smlal v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smlal v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smlal2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smlal2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smlal2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smlsl v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smlsl v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smlsl v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smlsl2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smlsl2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smlsl2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smull v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smull v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smull v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smull2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smull2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - smull2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqabs b19, b14
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqabs d18, d12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqabs h21, h15
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqabs s20, s12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqabs v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqabs v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqabs v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqabs v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqabs v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqabs v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqabs v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqadd b20, b11, b15
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqadd v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqadd v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmlal d19, s24, s12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmlal d8, s9, v0.s[1]
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmlal s0, h0, v0.h[3]
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmlal s17, h27, h12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmlal v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmlal v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmlal2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmlal2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmlsl d12, s23, s13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmlsl d8, s9, v0.s[1]
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmlsl s0, h0, v0.h[3]
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmlsl s14, h12, h25
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmlsl v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmlsl v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmlsl2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmlsl2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmulh h10, h11, h12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmulh h7, h15, v0.h[3]
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmulh s15, s14, v0.s[1]
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmulh s20, s21, s2
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmulh v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmulh v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmull d1, s1, v0.s[1]
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmull d15, s22, s12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmull s1, h1, v0.h[3]
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmull s12, h22, h12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmull v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmull v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmull2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqdmull2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqneg b19, b14
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqneg d18, d12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqneg h21, h15
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqneg s20, s12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqneg v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqneg v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqneg v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqneg v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqneg v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqneg v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqneg v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrdmulh h10, h11, h12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrdmulh h7, h15, v0.h[3]
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrdmulh s15, s14, v0.s[1]
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrdmulh s20, s21, s2
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrdmulh v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrdmulh v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrshl d31, d31, d31
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrshl h3, h4, h15
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrshl v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrshl v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrshl v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrshrn b10, h13, #2
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrshrn h15, s10, #6
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrshrn s15, d12, #9
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrshrn v0.2s, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrshrn v0.4h, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrshrn v0.8b, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrshrn2 v0.16b, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrshrn2 v0.4s, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrshrn2 v0.8h, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrshrun b17, h10, #6
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrshrun h10, s13, #15
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrshrun s22, d16, #31
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrshrun v0.2s, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrshrun v0.4h, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrshrun v0.8b, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrshrun2 v0.16b, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrshrun2 v0.4s, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqrshrun2 v0.8h, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshl b11, b19, #7
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshl d15, d16, #51
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshl d31, d31, d31
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshl h13, h18, #11
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshl h3, h4, h15
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshl s14, s17, #22
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshl v0.16b, v0.16b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshl v0.2d, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshl v0.2s, v0.2s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshl v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshl v0.4h, v0.4h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshl v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshl v0.4s, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshl v0.8b, v0.8b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshl v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshl v0.8h, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshlu b15, b18, #6
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshlu d11, d13, #32
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshlu h19, h17, #6
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshlu s16, s14, #25
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshlu v0.16b, v0.16b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshlu v0.2d, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshlu v0.2s, v0.2s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshlu v0.4h, v0.4h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshlu v0.4s, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshlu v0.8b, v0.8b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshlu v0.8h, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshrn b10, h15, #5
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshrn h17, s10, #4
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshrn s18, d10, #31
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - sqshrn v0.2s, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - sqshrn v0.4h, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - sqshrn v0.8b, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - sqshrn2 v0.16b, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - sqshrn2 v0.4s, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - sqshrn2 v0.8h, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshrun b15, h10, #7
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshrun h20, s14, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqshrun s10, d15, #15
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - sqshrun v0.2s, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - sqshrun v0.4h, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - sqshrun v0.8b, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - sqshrun2 v0.16b, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - sqshrun2 v0.4s, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - sqshrun2 v0.8h, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqsub s20, s10, s7
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqsub v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqsub v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqsub v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqxtn b18, h18
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqxtn h20, s17
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqxtn s19, d14
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqxtn v0.2s, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqxtn v0.4h, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqxtn v0.8b, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqxtn2 v0.16b, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqxtn2 v0.4s, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqxtn2 v0.8h, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqxtun b19, h14
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqxtun h21, s15
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqxtun s20, d12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqxtun v0.2s, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqxtun v0.4h, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqxtun v0.8b, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqxtun2 v0.16b, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqxtun2 v0.4s, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sqxtun2 v0.8h, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - srhadd v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - srhadd v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - srhadd v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sri d10, d12, #14
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sri v0.16b, v0.16b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sri v0.2d, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sri v0.2s, v0.2s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sri v0.4h, v0.4h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sri v0.4s, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sri v0.8b, v0.8b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sri v0.8h, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - srshl d16, d16, d16
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - srshl v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - srshl v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - srshl v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - srshr d19, d18, #7
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - srshr v0.16b, v0.16b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - srshr v0.2d, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - srshr v0.2s, v0.2s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - srshr v0.4h, v0.4h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - srshr v0.4s, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - srshr v0.8b, v0.8b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - srshr v0.8h, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - srsra d15, d11, #19
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - srsra v0.16b, v0.16b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - srsra v0.2d, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - srsra v0.2s, v0.2s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - srsra v0.4h, v0.4h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - srsra v0.4s, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - srsra v0.8b, v0.8b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - srsra v0.8h, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sshl d31, d31, d31
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sshl v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sshl v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sshl v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sshl v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sshll v0.2d, v0.2s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sshll2 v0.4s, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sshr d15, d16, #12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sshr v0.16b, v0.16b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sshr v0.2d, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sshr v0.2s, v0.2s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sshr v0.4h, v0.4h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sshr v0.4s, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sshr v0.8b, v0.8b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sshr v0.8h, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ssra d18, d12, #21
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ssra v0.16b, v0.16b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ssra v0.2d, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ssra v0.2s, v0.2s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ssra v0.4h, v0.4h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ssra v0.4s, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ssra v0.8b, v0.8b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ssra v0.8h, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ssubl v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ssubl v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ssubl v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ssubl2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ssubl2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ssubl2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ssubw v0.2d, v0.2d, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ssubw v0.4s, v0.4s, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ssubw v0.8h, v0.8h, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ssubw2 v0.2d, v0.2d, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ssubw2 v0.4s, v0.4s, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ssubw2 v0.8h, v0.8h, v0.16b
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - 1.00 st1 { v0.16b }, [x0]
+# CHECK-NEXT: - - - - - - - 1.50 1.50 - - 3.00 st1 { v0.2d, v1.2d, v2.2d }, [x0], #48
+# CHECK-NEXT: - - - - - - - 2.00 2.00 - - 4.00 st1 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - 2.00 st1 { v0.4s, v1.4s }, [sp], #32
+# CHECK-NEXT: - - - - - - - 1.50 1.50 - - 3.00 st1 { v0.4s, v1.4s, v2.4s }, [sp]
+# CHECK-NEXT: - - - - - - - 2.00 2.00 - - 4.00 st1 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0], x3
+# CHECK-NEXT: - - - - - - - 0.50 0.50 - - 1.00 st1 { v0.8h }, [x15], x2
+# CHECK-NEXT: - - - - - - - 1.00 1.00 - - 2.00 st1 { v0.8h, v1.8h }, [x15]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 0.50 0.50 1.00 st1 { v0.d }[1], [x0]
+# CHECK-NEXT: - - - - - - - 0.50 0.50 0.50 0.50 1.00 st1 { v0.d }[1], [x0], #8
+# CHECK-NEXT: - - - - - - - 1.00 1.00 1.00 1.00 2.00 st2 { v0.16b, v1.16b }, [x0], x1
+# CHECK-NEXT: - - - - - - - 1.00 1.00 1.00 1.00 2.00 st2 { v0.8b, v1.8b }, [x0]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 1.00 1.00 2.00 st2 { v0.s, v1.s }[3], [sp]
+# CHECK-NEXT: - - - - - - - 1.00 1.00 1.00 1.00 2.00 st2 { v0.s, v1.s }[3], [sp], #8
+# CHECK-NEXT: - - - - - - - 1.50 1.50 1.50 1.50 3.00 st3 { v0.4h, v1.4h, v2.4h }, [x15]
+# CHECK-NEXT: - - - - - - - 1.50 1.50 1.50 1.50 3.00 st3 { v0.8h, v1.8h, v2.8h }, [x15], x2
+# CHECK-NEXT: - - - - - - - 1.50 1.50 1.50 1.50 3.00 st3 { v0.h, v1.h, v2.h }[7], [x15]
+# CHECK-NEXT: - - - - - - - 1.50 1.50 1.50 1.50 3.00 st3 { v0.h, v1.h, v2.h }[7], [x15], #6
+# CHECK-NEXT: - - - - - - - 2.00 2.00 3.00 3.00 4.00 st4 { v0.2s, v1.2s, v2.2s, v3.2s }, [sp]
+# CHECK-NEXT: - - - - - - - 2.00 2.00 2.00 2.00 4.00 st4 { v0.4s, v1.4s, v2.4s, v3.4s }, [sp], #64
+# CHECK-NEXT: - - - - - - - 2.00 2.00 2.00 2.00 4.00 st4 { v0.b, v1.b, v2.b, v3.b }[9], [x0]
+# CHECK-NEXT: - - - - - - - 2.00 2.00 2.00 2.00 4.00 st4 { v0.b, v1.b, v2.b, v3.b }[9], [x0], x5
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sub d15, d5, d16
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - sub v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - suqadd b19, b14
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - suqadd d18, d22
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - suqadd h20, h15
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - suqadd s21, s12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - suqadd v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - suqadd v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - suqadd v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - suqadd v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - suqadd v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - suqadd v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - suqadd v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - tbl v0.16b, { v0.16b }, v0.16b
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - tbl v0.16b, { v0.16b, v1.16b }, v0.16b
+# CHECK-NEXT: - - - - - - - - - 1.50 1.50 - tbl v0.16b, { v0.16b, v1.16b, v2.16b }, v0.16b
+# CHECK-NEXT: - - - - - - - - - 2.00 2.00 - tbl v0.16b, { v0.16b, v1.16b, v2.16b, v3.16b }, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - tbl v0.8b, { v0.16b }, v0.8b
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - tbl v0.8b, { v0.16b, v1.16b }, v0.8b
+# CHECK-NEXT: - - - - - - - - - 1.50 1.50 - tbl v0.8b, { v0.16b, v1.16b, v2.16b }, v0.8b
+# CHECK-NEXT: - - - - - - - - - 2.00 2.00 - tbl v0.8b, { v0.16b, v1.16b, v2.16b, v3.16b }, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - tbx v0.16b, { v0.16b }, v0.16b
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - tbx v0.16b, { v0.16b, v1.16b }, v0.16b
+# CHECK-NEXT: - - - - - - - - - 1.50 1.50 - tbx v0.16b, { v0.16b, v1.16b, v2.16b }, v0.16b
+# CHECK-NEXT: - - - - - - - - - 2.00 2.00 - tbx v0.16b, { v0.16b, v1.16b, v2.16b, v3.16b }, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - tbx v0.8b, { v0.16b }, v0.8b
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - tbx v0.8b, { v0.16b, v1.16b }, v0.8b
+# CHECK-NEXT: - - - - - - - - - 1.50 1.50 - tbx v0.8b, { v0.16b, v1.16b, v2.16b }, v0.8b
+# CHECK-NEXT: - - - - - - - - - 2.00 2.00 - tbx v0.8b, { v0.16b, v1.16b, v2.16b, v3.16b }, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - trn1 v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - trn1 v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - trn1 v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - trn1 v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - trn1 v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - trn1 v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - trn1 v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - trn2 v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - trn2 v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - trn2 v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - trn2 v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - trn2 v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - trn2 v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - trn2 v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uaba v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uabal v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uabal v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uabal v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uabal2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uabal2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uabal2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uabd v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uabdl v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uabdl v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uabdl v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uabdl2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uabdl2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uabdl2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uadalp v0.1d, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uadalp v0.2d, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uadalp v0.2s, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uadalp v0.4h, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uadalp v0.4s, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uadalp v0.8h, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uaddl v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uaddl v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uaddl v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uaddl2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uaddl2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uaddl2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uaddlp v0.1d, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uaddlp v0.2d, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uaddlp v0.2s, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uaddlp v0.4h, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uaddlp v0.4s, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uaddlp v0.8h, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uaddw v0.2d, v0.2d, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uaddw v0.4s, v0.4s, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uaddw v0.8h, v0.8h, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uaddw2 v0.2d, v0.2d, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uaddw2 v0.4s, v0.4s, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uaddw2 v0.8h, v0.8h, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ucvtf d21, d14
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ucvtf d21, d14, #64
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ucvtf s22, s13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ucvtf s22, s13, #32
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ucvtf v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ucvtf v0.2d, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ucvtf v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ucvtf v0.2s, v0.2s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ucvtf v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ucvtf v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ucvtf v0.4s, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ucvtf v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uhadd v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uhadd v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uhsub v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umax v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umax v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umax v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umaxp v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umaxp v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umaxp v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umin v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umin v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umin v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uminp v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uminp v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uminp v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umlal v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umlal v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umlal v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umlal2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umlal2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umlal2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umlsl v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umlsl v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umlsl v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umlsl2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umlsl2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umlsl2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umull v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umull v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umull v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umull2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umull2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - umull2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqadd h0, h1, h5
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqadd v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqrshl b11, b20, b30
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqrshl s23, s20, s16
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqrshl v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqrshl v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqrshl v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqrshl v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqrshrn b10, h12, #5
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqrshrn h12, s10, #14
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqrshrn s10, d10, #25
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqrshrn v0.2s, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqrshrn v0.4h, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqrshrn v0.8b, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqrshrn2 v0.16b, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqrshrn2 v0.4s, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqrshrn2 v0.8h, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqshl b11, b20, b30
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqshl b18, b15, #6
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqshl d15, d12, #19
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqshl h11, h18, #7
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqshl s14, s19, #18
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqshl s23, s20, s16
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqshl v0.16b, v0.16b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqshl v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqshl v0.2d, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqshl v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqshl v0.2s, v0.2s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqshl v0.4h, v0.4h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqshl v0.4s, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqshl v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqshl v0.8b, v0.8b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqshl v0.8h, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqshl v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqshrn b12, h10, #7
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqshrn h10, s14, #5
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqshrn s10, d12, #13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqshrn v0.2s, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqshrn v0.4h, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqshrn v0.8b, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqshrn2 v0.16b, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqshrn2 v0.4s, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqshrn2 v0.8h, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqsub d16, d16, d16
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uqsub v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - uqxtn b18, h18
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - uqxtn h20, s17
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - uqxtn s19, d14
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - uqxtn v0.2s, v0.2d
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - uqxtn v0.4h, v0.4s
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - uqxtn v0.8b, v0.8h
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - uqxtn2 v0.16b, v0.8h
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - uqxtn2 v0.4s, v0.2d
+# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - uqxtn2 v0.8h, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - urecpe v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - urecpe v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - urhadd v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - urhadd v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - urhadd v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - urshl d8, d7, d4
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - urshl v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - urshl v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - urshl v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - urshl v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - urshr d20, d23, #31
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - urshr v0.16b, v0.16b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - urshr v0.2d, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - urshr v0.2s, v0.2s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - urshr v0.4h, v0.4h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - urshr v0.4s, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - urshr v0.8b, v0.8b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - urshr v0.8h, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ursqrte v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ursqrte v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ursra d18, d10, #13
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ursra v0.16b, v0.16b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ursra v0.2d, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ursra v0.2s, v0.2s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ursra v0.4h, v0.4h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ursra v0.4s, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ursra v0.8b, v0.8b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ursra v0.8h, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ushl d0, d0, d0
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ushl v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ushl v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ushl v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ushll v0.4s, v0.4h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ushll2 v0.8h, v0.16b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ushr d10, d17, #18
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ushr v0.16b, v0.16b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ushr v0.2d, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ushr v0.2s, v0.2s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ushr v0.4h, v0.4h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ushr v0.4s, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ushr v0.8b, v0.8b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - ushr v0.8h, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usqadd b19, b14
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usqadd d18, d22
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usqadd h20, h15
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usqadd s21, s12
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usqadd v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usqadd v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usqadd v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usqadd v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usqadd v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usqadd v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usqadd v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usra d20, d13, #61
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usra v0.16b, v0.16b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usra v0.2d, v0.2d, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usra v0.2s, v0.2s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usra v0.4h, v0.4h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usra v0.4s, v0.4s, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usra v0.8b, v0.8b, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usra v0.8h, v0.8h, #3
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usubl v0.2d, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usubl v0.4s, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usubl v0.8h, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usubl2 v0.2d, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usubl2 v0.4s, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usubl2 v0.8h, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usubw v0.2d, v0.2d, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usubw v0.4s, v0.4s, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usubw v0.8h, v0.8h, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usubw2 v0.2d, v0.2d, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usubw2 v0.4s, v0.4s, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - usubw2 v0.8h, v0.8h, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uzp1 v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uzp1 v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uzp1 v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uzp1 v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uzp1 v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uzp1 v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uzp1 v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uzp2 v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uzp2 v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uzp2 v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uzp2 v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uzp2 v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uzp2 v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - uzp2 v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - xtn v0.2s, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - xtn v0.4h, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - xtn v0.8b, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - xtn2 v0.16b, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - xtn2 v0.4s, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - xtn2 v0.8h, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - zip1 v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - zip1 v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - zip1 v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - zip1 v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - zip1 v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - zip1 v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - zip1 v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - zip2 v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - zip2 v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - zip2 v0.2s, v0.2s, v0.2s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - zip2 v0.4h, v0.4h, v0.4h
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - zip2 v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - zip2 v0.8b, v0.8b, v0.8b
+# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - zip2 v0.8h, v0.8h, v0.8h
diff --git a/llvm/test/tools/llvm-mca/AArch64/Ampere/Ampere1B/shifted-register.s b/llvm/test/tools/llvm-mca/AArch64/Ampere/Ampere1B/shifted-register.s
new file mode 100644
index 0000000..27e0279
--- /dev/null
+++ b/llvm/test/tools/llvm-mca/AArch64/Ampere/Ampere1B/shifted-register.s
@@ -0,0 +1,31 @@
+# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
+# RUN: llvm-mca -march=aarch64 -mcpu=ampere1b -resource-pressure=false < %s | FileCheck %s
+
+ add w0, w1, w2, lsl #0
+ sub x3, x4, x5, lsl #1
+ adds x6, x7, x8, lsr #2
+ subs x9, x10, x11, asr #3
+
+# CHECK: Iterations: 100
+# CHECK-NEXT: Instructions: 400
+# CHECK-NEXT: Total Cycles: 156
+# CHECK-NEXT: Total uOps: 600
+
+# CHECK: Dispatch Width: 12
+# CHECK-NEXT: uOps Per Cycle: 3.85
+# CHECK-NEXT: IPC: 2.56
+# CHECK-NEXT: Block RThroughput: 1.0
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 1 1 0.25 add w0, w1, w2
+# CHECK-NEXT: 1 1 0.25 sub x3, x4, x5, lsl #1
+# CHECK-NEXT: 2 2 0.50 adds x6, x7, x8, lsr #2
+# CHECK-NEXT: 2 2 0.50 subs x9, x10, x11, asr #3
diff --git a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512.s b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512.s
index 6d33fdb..5c12c52 100644
--- a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512.s
+++ b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512.s
@@ -1084,24 +1084,24 @@ vunpcklps (%rax){1to16}, %zmm17, %zmm19 {z}{k1}
# CHECK-NEXT: 1 4 1.00 kshiftlw $2, %k1, %k2
# CHECK-NEXT: 1 4 1.00 kshiftrw $2, %k1, %k2
# CHECK-NEXT: 1 4 1.00 kunpckbw %k0, %k1, %k2
-# CHECK-NEXT: 1 4 0.50 vaddpd %zmm16, %zmm17, %zmm19
-# CHECK-NEXT: 2 11 0.50 * vaddpd (%rax), %zmm17, %zmm19
-# CHECK-NEXT: 2 11 0.50 * vaddpd (%rax){1to8}, %zmm17, %zmm19
-# CHECK-NEXT: 1 4 0.50 vaddpd %zmm16, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: 2 11 0.50 * vaddpd (%rax), %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: 2 11 0.50 * vaddpd (%rax){1to8}, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: 1 4 0.50 vaddpd %zmm16, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: 2 11 0.50 * vaddpd (%rax), %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: 2 11 0.50 * vaddpd (%rax){1to8}, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: 1 4 0.50 vaddps %zmm16, %zmm17, %zmm19
-# CHECK-NEXT: 2 11 0.50 * vaddps (%rax), %zmm17, %zmm19
-# CHECK-NEXT: 2 11 0.50 * vaddps (%rax){1to16}, %zmm17, %zmm19
-# CHECK-NEXT: 1 4 0.50 vaddps %zmm16, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: 2 11 0.50 * vaddps (%rax), %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: 2 11 0.50 * vaddps (%rax){1to16}, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: 1 4 0.50 vaddps %zmm16, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: 2 11 0.50 * vaddps (%rax), %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: 2 11 0.50 * vaddps (%rax){1to16}, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: 1 4 1.00 vaddpd %zmm16, %zmm17, %zmm19
+# CHECK-NEXT: 2 11 1.00 * vaddpd (%rax), %zmm17, %zmm19
+# CHECK-NEXT: 2 11 1.00 * vaddpd (%rax){1to8}, %zmm17, %zmm19
+# CHECK-NEXT: 1 4 1.00 vaddpd %zmm16, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: 2 11 1.00 * vaddpd (%rax), %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: 2 11 1.00 * vaddpd (%rax){1to8}, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: 1 4 1.00 vaddpd %zmm16, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: 2 11 1.00 * vaddpd (%rax), %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: 2 11 1.00 * vaddpd (%rax){1to8}, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: 1 4 1.00 vaddps %zmm16, %zmm17, %zmm19
+# CHECK-NEXT: 2 11 1.00 * vaddps (%rax), %zmm17, %zmm19
+# CHECK-NEXT: 2 11 1.00 * vaddps (%rax){1to16}, %zmm17, %zmm19
+# CHECK-NEXT: 1 4 1.00 vaddps %zmm16, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: 2 11 1.00 * vaddps (%rax), %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: 2 11 1.00 * vaddps (%rax){1to16}, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: 1 4 1.00 vaddps %zmm16, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: 2 11 1.00 * vaddps (%rax), %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: 2 11 1.00 * vaddps (%rax){1to16}, %zmm17, %zmm19 {%k1} {z}
# CHECK-NEXT: 1 3 1.00 valignd $1, %zmm16, %zmm17, %zmm19
# CHECK-NEXT: 2 10 1.00 * valignd $1, (%rax), %zmm17, %zmm19
# CHECK-NEXT: 2 10 1.00 * valignd $1, (%rax){1to16}, %zmm17, %zmm19
@@ -1510,24 +1510,24 @@ vunpcklps (%rax){1to16}, %zmm17, %zmm19 {z}{k1}
# CHECK-NEXT: 2 1 0.50 * vmovups %zmm16, (%rax) {%k1}
# CHECK-NEXT: 1 1 0.50 vmovups %zmm16, %zmm19 {%k1} {z}
# CHECK-NEXT: 2 8 0.50 * vmovups (%rax), %zmm19 {%k1} {z}
-# CHECK-NEXT: 1 4 0.50 vmulpd %zmm16, %zmm17, %zmm19
-# CHECK-NEXT: 2 11 0.50 * vmulpd (%rax), %zmm17, %zmm19
-# CHECK-NEXT: 2 11 0.50 * vmulpd (%rax){1to8}, %zmm17, %zmm19
-# CHECK-NEXT: 1 4 0.50 vmulpd %zmm16, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: 2 11 0.50 * vmulpd (%rax), %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: 2 11 0.50 * vmulpd (%rax){1to8}, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: 1 4 0.50 vmulpd %zmm16, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: 2 11 0.50 * vmulpd (%rax), %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: 2 11 0.50 * vmulpd (%rax){1to8}, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: 1 4 0.50 vmulps %zmm16, %zmm17, %zmm19
-# CHECK-NEXT: 2 11 0.50 * vmulps (%rax), %zmm17, %zmm19
-# CHECK-NEXT: 2 11 0.50 * vmulps (%rax){1to16}, %zmm17, %zmm19
-# CHECK-NEXT: 1 4 0.50 vmulps %zmm16, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: 2 11 0.50 * vmulps (%rax), %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: 2 11 0.50 * vmulps (%rax){1to16}, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: 1 4 0.50 vmulps %zmm16, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: 2 11 0.50 * vmulps (%rax), %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: 2 11 0.50 * vmulps (%rax){1to16}, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: 1 4 1.00 vmulpd %zmm16, %zmm17, %zmm19
+# CHECK-NEXT: 2 11 1.00 * vmulpd (%rax), %zmm17, %zmm19
+# CHECK-NEXT: 2 11 1.00 * vmulpd (%rax){1to8}, %zmm17, %zmm19
+# CHECK-NEXT: 1 4 1.00 vmulpd %zmm16, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: 2 11 1.00 * vmulpd (%rax), %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: 2 11 1.00 * vmulpd (%rax){1to8}, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: 1 4 1.00 vmulpd %zmm16, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: 2 11 1.00 * vmulpd (%rax), %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: 2 11 1.00 * vmulpd (%rax){1to8}, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: 1 4 1.00 vmulps %zmm16, %zmm17, %zmm19
+# CHECK-NEXT: 2 11 1.00 * vmulps (%rax), %zmm17, %zmm19
+# CHECK-NEXT: 2 11 1.00 * vmulps (%rax){1to16}, %zmm17, %zmm19
+# CHECK-NEXT: 1 4 1.00 vmulps %zmm16, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: 2 11 1.00 * vmulps (%rax), %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: 2 11 1.00 * vmulps (%rax){1to16}, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: 1 4 1.00 vmulps %zmm16, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: 2 11 1.00 * vmulps (%rax), %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: 2 11 1.00 * vmulps (%rax){1to16}, %zmm17, %zmm19 {%k1} {z}
# CHECK-NEXT: 1 1 1.00 vpabsd %zmm16, %zmm19
# CHECK-NEXT: 2 8 1.00 * vpabsd (%rax), %zmm19
# CHECK-NEXT: 2 8 1.00 * vpabsd (%rax){1to16}, %zmm19
@@ -1958,24 +1958,24 @@ vunpcklps (%rax){1to16}, %zmm17, %zmm19 {z}{k1}
# CHECK-NEXT: 1 4 1.00 vptestnmq %zmm0, %zmm1, %k2 {%k3}
# CHECK-NEXT: 2 11 1.00 * vptestnmq (%rax), %zmm1, %k2 {%k3}
# CHECK-NEXT: 2 11 1.00 * vptestnmq (%rax){1to8}, %zmm1, %k2 {%k3}
-# CHECK-NEXT: 1 4 0.50 vsubpd %zmm16, %zmm17, %zmm19
-# CHECK-NEXT: 2 11 0.50 * vsubpd (%rax), %zmm17, %zmm19
-# CHECK-NEXT: 2 11 0.50 * vsubpd (%rax){1to8}, %zmm17, %zmm19
-# CHECK-NEXT: 1 4 0.50 vsubpd %zmm16, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: 2 11 0.50 * vsubpd (%rax), %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: 2 11 0.50 * vsubpd (%rax){1to8}, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: 1 4 0.50 vsubpd %zmm16, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: 2 11 0.50 * vsubpd (%rax), %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: 2 11 0.50 * vsubpd (%rax){1to8}, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: 1 4 0.50 vsubps %zmm16, %zmm17, %zmm19
-# CHECK-NEXT: 2 11 0.50 * vsubps (%rax), %zmm17, %zmm19
-# CHECK-NEXT: 2 11 0.50 * vsubps (%rax){1to16}, %zmm17, %zmm19
-# CHECK-NEXT: 1 4 0.50 vsubps %zmm16, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: 2 11 0.50 * vsubps (%rax), %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: 2 11 0.50 * vsubps (%rax){1to16}, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: 1 4 0.50 vsubps %zmm16, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: 2 11 0.50 * vsubps (%rax), %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: 2 11 0.50 * vsubps (%rax){1to16}, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: 1 4 1.00 vsubpd %zmm16, %zmm17, %zmm19
+# CHECK-NEXT: 2 11 1.00 * vsubpd (%rax), %zmm17, %zmm19
+# CHECK-NEXT: 2 11 1.00 * vsubpd (%rax){1to8}, %zmm17, %zmm19
+# CHECK-NEXT: 1 4 1.00 vsubpd %zmm16, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: 2 11 1.00 * vsubpd (%rax), %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: 2 11 1.00 * vsubpd (%rax){1to8}, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: 1 4 1.00 vsubpd %zmm16, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: 2 11 1.00 * vsubpd (%rax), %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: 2 11 1.00 * vsubpd (%rax){1to8}, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: 1 4 1.00 vsubps %zmm16, %zmm17, %zmm19
+# CHECK-NEXT: 2 11 1.00 * vsubps (%rax), %zmm17, %zmm19
+# CHECK-NEXT: 2 11 1.00 * vsubps (%rax){1to16}, %zmm17, %zmm19
+# CHECK-NEXT: 1 4 1.00 vsubps %zmm16, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: 2 11 1.00 * vsubps (%rax), %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: 2 11 1.00 * vsubps (%rax){1to16}, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: 1 4 1.00 vsubps %zmm16, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: 2 11 1.00 * vsubps (%rax), %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: 2 11 1.00 * vsubps (%rax){1to16}, %zmm17, %zmm19 {%k1} {z}
# CHECK-NEXT: 1 2 1.00 vucomiss %xmm16, %xmm17
# CHECK-NEXT: 2 7 1.00 * vucomiss (%rax), %xmm17
# CHECK-NEXT: 1 1 1.00 vunpckhpd %zmm16, %zmm17, %zmm19
@@ -2031,7 +2031,7 @@ vunpcklps (%rax){1to16}, %zmm17, %zmm19 {z}{k1}
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11]
-# CHECK-NEXT: - 612.00 371.17 99.67 327.50 327.50 8.00 612.17 2.00 8.00 8.00 8.00
+# CHECK-NEXT: - 612.00 398.17 99.67 327.50 327.50 8.00 585.17 2.00 8.00 8.00 8.00
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] Instructions:
@@ -2044,24 +2044,24 @@ vunpcklps (%rax){1to16}, %zmm17, %zmm19 {z}{k1}
# CHECK-NEXT: - - - - - - - 1.00 - - - - kshiftlw $2, %k1, %k2
# CHECK-NEXT: - - - - - - - 1.00 - - - - kshiftrw $2, %k1, %k2
# CHECK-NEXT: - - - - - - - 1.00 - - - - kunpckbw %k0, %k1, %k2
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vaddpd %zmm16, %zmm17, %zmm19
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vaddpd (%rax), %zmm17, %zmm19
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vaddpd (%rax){1to8}, %zmm17, %zmm19
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vaddpd %zmm16, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vaddpd (%rax), %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vaddpd (%rax){1to8}, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vaddpd %zmm16, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vaddpd (%rax), %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vaddpd (%rax){1to8}, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vaddps %zmm16, %zmm17, %zmm19
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vaddps (%rax), %zmm17, %zmm19
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vaddps (%rax){1to16}, %zmm17, %zmm19
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vaddps %zmm16, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vaddps (%rax), %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vaddps (%rax){1to16}, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vaddps %zmm16, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vaddps (%rax), %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vaddps (%rax){1to16}, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vaddpd %zmm16, %zmm17, %zmm19
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vaddpd (%rax), %zmm17, %zmm19
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vaddpd (%rax){1to8}, %zmm17, %zmm19
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vaddpd %zmm16, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vaddpd (%rax), %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vaddpd (%rax){1to8}, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vaddpd %zmm16, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vaddpd (%rax), %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vaddpd (%rax){1to8}, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vaddps %zmm16, %zmm17, %zmm19
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vaddps (%rax), %zmm17, %zmm19
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vaddps (%rax){1to16}, %zmm17, %zmm19
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vaddps %zmm16, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vaddps (%rax), %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vaddps (%rax){1to16}, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vaddps %zmm16, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vaddps (%rax), %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vaddps (%rax){1to16}, %zmm17, %zmm19 {%k1} {z}
# CHECK-NEXT: - - - - - - - 1.00 - - - - valignd $1, %zmm16, %zmm17, %zmm19
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - - - valignd $1, (%rax), %zmm17, %zmm19
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - - - valignd $1, (%rax){1to16}, %zmm17, %zmm19
@@ -2470,24 +2470,24 @@ vunpcklps (%rax){1to16}, %zmm17, %zmm19 {z}{k1}
# CHECK-NEXT: - - - - - - 0.50 - - 0.50 0.50 0.50 vmovups %zmm16, (%rax) {%k1}
# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vmovups %zmm16, %zmm19 {%k1} {z}
# CHECK-NEXT: - - 0.33 0.33 0.50 0.50 - 0.33 - - - - vmovups (%rax), %zmm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vmulpd %zmm16, %zmm17, %zmm19
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vmulpd (%rax), %zmm17, %zmm19
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vmulpd (%rax){1to8}, %zmm17, %zmm19
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vmulpd %zmm16, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vmulpd (%rax), %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vmulpd (%rax){1to8}, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vmulpd %zmm16, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vmulpd (%rax), %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vmulpd (%rax){1to8}, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vmulps %zmm16, %zmm17, %zmm19
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vmulps (%rax), %zmm17, %zmm19
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vmulps (%rax){1to16}, %zmm17, %zmm19
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vmulps %zmm16, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vmulps (%rax), %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vmulps (%rax){1to16}, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vmulps %zmm16, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vmulps (%rax), %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vmulps (%rax){1to16}, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vmulpd %zmm16, %zmm17, %zmm19
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vmulpd (%rax), %zmm17, %zmm19
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vmulpd (%rax){1to8}, %zmm17, %zmm19
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vmulpd %zmm16, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vmulpd (%rax), %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vmulpd (%rax){1to8}, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vmulpd %zmm16, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vmulpd (%rax), %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vmulpd (%rax){1to8}, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vmulps %zmm16, %zmm17, %zmm19
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vmulps (%rax), %zmm17, %zmm19
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vmulps (%rax){1to16}, %zmm17, %zmm19
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vmulps %zmm16, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vmulps (%rax), %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vmulps (%rax){1to16}, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vmulps %zmm16, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vmulps (%rax), %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vmulps (%rax){1to16}, %zmm17, %zmm19 {%k1} {z}
# CHECK-NEXT: - - 1.00 - - - - - - - - - vpabsd %zmm16, %zmm19
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vpabsd (%rax), %zmm19
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vpabsd (%rax){1to16}, %zmm19
@@ -2918,24 +2918,24 @@ vunpcklps (%rax){1to16}, %zmm17, %zmm19 {z}{k1}
# CHECK-NEXT: - - - - - - - 1.00 - - - - vptestnmq %zmm0, %zmm1, %k2 {%k3}
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - - - vptestnmq (%rax), %zmm1, %k2 {%k3}
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - - - vptestnmq (%rax){1to8}, %zmm1, %k2 {%k3}
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vsubpd %zmm16, %zmm17, %zmm19
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vsubpd (%rax), %zmm17, %zmm19
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vsubpd (%rax){1to8}, %zmm17, %zmm19
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vsubpd %zmm16, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vsubpd (%rax), %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vsubpd (%rax){1to8}, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vsubpd %zmm16, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vsubpd (%rax), %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vsubpd (%rax){1to8}, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vsubps %zmm16, %zmm17, %zmm19
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vsubps (%rax), %zmm17, %zmm19
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vsubps (%rax){1to16}, %zmm17, %zmm19
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vsubps %zmm16, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vsubps (%rax), %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vsubps (%rax){1to16}, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vsubps %zmm16, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vsubps (%rax), %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vsubps (%rax){1to16}, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vsubpd %zmm16, %zmm17, %zmm19
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vsubpd (%rax), %zmm17, %zmm19
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vsubpd (%rax){1to8}, %zmm17, %zmm19
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vsubpd %zmm16, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vsubpd (%rax), %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vsubpd (%rax){1to8}, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vsubpd %zmm16, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vsubpd (%rax), %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vsubpd (%rax){1to8}, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vsubps %zmm16, %zmm17, %zmm19
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vsubps (%rax), %zmm17, %zmm19
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vsubps (%rax){1to16}, %zmm17, %zmm19
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vsubps %zmm16, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vsubps (%rax), %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vsubps (%rax){1to16}, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vsubps %zmm16, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vsubps (%rax), %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vsubps (%rax){1to16}, %zmm17, %zmm19 {%k1} {z}
# CHECK-NEXT: - - 1.00 - - - - - - - - - vucomiss %xmm16, %xmm17
# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vucomiss (%rax), %xmm17
# CHECK-NEXT: - - - - - - - 1.00 - - - - vunpckhpd %zmm16, %zmm17, %zmm19
diff --git a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512dq.s b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512dq.s
index 4da4ceb..42041cd 100644
--- a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512dq.s
+++ b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512dq.s
@@ -753,30 +753,30 @@ vxorps (%rax){1to16}, %zmm17, %zmm19 {z}{k1}
# CHECK-NEXT: 4 22 3.00 * vpmullq (%rax), %zmm17, %zmm19 {%k1}
# CHECK-NEXT: 3 15 3.00 vpmullq %zmm16, %zmm17, %zmm19 {%k1} {z}
# CHECK-NEXT: 4 22 3.00 * vpmullq (%rax), %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: 1 4 0.50 vrangepd $ab, %zmm16, %zmm17, %zmm19
-# CHECK-NEXT: 2 11 0.50 * vrangepd $ab, (%rax), %zmm17, %zmm19
-# CHECK-NEXT: 2 11 0.50 * vrangepd $ab, (%rax){1to8}, %zmm17, %zmm19
-# CHECK-NEXT: 1 4 0.50 vrangepd $ab, %zmm16, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: 2 11 0.50 * vrangepd $ab, (%rax), %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: 2 11 0.50 * vrangepd $ab, (%rax){1to8}, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: 1 4 0.50 vrangepd $ab, %zmm16, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: 2 11 0.50 * vrangepd $ab, (%rax), %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: 2 11 0.50 * vrangepd $ab, (%rax){1to8}, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: 1 4 0.50 vrangepd $ab, {sae}, %zmm16, %zmm17, %zmm19
-# CHECK-NEXT: 1 4 0.50 vrangepd $ab, {sae}, %zmm16, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: 1 4 0.50 vrangepd $ab, {sae}, %zmm16, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: 1 4 0.50 vrangeps $ab, %zmm16, %zmm17, %zmm19
-# CHECK-NEXT: 2 11 0.50 * vrangeps $ab, (%rax), %zmm17, %zmm19
-# CHECK-NEXT: 2 11 0.50 * vrangeps $ab, (%rax){1to16}, %zmm17, %zmm19
-# CHECK-NEXT: 1 4 0.50 vrangeps $ab, %zmm16, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: 2 11 0.50 * vrangeps $ab, (%rax), %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: 2 11 0.50 * vrangeps $ab, (%rax){1to16}, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: 1 4 0.50 vrangeps $ab, %zmm16, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: 2 11 0.50 * vrangeps $ab, (%rax), %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: 2 11 0.50 * vrangeps $ab, (%rax){1to16}, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: 1 4 0.50 vrangeps $ab, {sae}, %zmm16, %zmm17, %zmm19
-# CHECK-NEXT: 1 4 0.50 vrangeps $ab, {sae}, %zmm16, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: 1 4 0.50 vrangeps $ab, {sae}, %zmm16, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: 1 4 1.00 vrangepd $ab, %zmm16, %zmm17, %zmm19
+# CHECK-NEXT: 2 11 1.00 * vrangepd $ab, (%rax), %zmm17, %zmm19
+# CHECK-NEXT: 2 11 1.00 * vrangepd $ab, (%rax){1to8}, %zmm17, %zmm19
+# CHECK-NEXT: 1 4 1.00 vrangepd $ab, %zmm16, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: 2 11 1.00 * vrangepd $ab, (%rax), %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: 2 11 1.00 * vrangepd $ab, (%rax){1to8}, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: 1 4 1.00 vrangepd $ab, %zmm16, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: 2 11 1.00 * vrangepd $ab, (%rax), %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: 2 11 1.00 * vrangepd $ab, (%rax){1to8}, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: 1 4 1.00 vrangepd $ab, {sae}, %zmm16, %zmm17, %zmm19
+# CHECK-NEXT: 1 4 1.00 vrangepd $ab, {sae}, %zmm16, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: 1 4 1.00 vrangepd $ab, {sae}, %zmm16, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: 1 4 1.00 vrangeps $ab, %zmm16, %zmm17, %zmm19
+# CHECK-NEXT: 2 11 1.00 * vrangeps $ab, (%rax), %zmm17, %zmm19
+# CHECK-NEXT: 2 11 1.00 * vrangeps $ab, (%rax){1to16}, %zmm17, %zmm19
+# CHECK-NEXT: 1 4 1.00 vrangeps $ab, %zmm16, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: 2 11 1.00 * vrangeps $ab, (%rax), %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: 2 11 1.00 * vrangeps $ab, (%rax){1to16}, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: 1 4 1.00 vrangeps $ab, %zmm16, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: 2 11 1.00 * vrangeps $ab, (%rax), %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: 2 11 1.00 * vrangeps $ab, (%rax){1to16}, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: 1 4 1.00 vrangeps $ab, {sae}, %zmm16, %zmm17, %zmm19
+# CHECK-NEXT: 1 4 1.00 vrangeps $ab, {sae}, %zmm16, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: 1 4 1.00 vrangeps $ab, {sae}, %zmm16, %zmm17, %zmm19 {%k1} {z}
# CHECK-NEXT: 1 4 0.50 vrangesd $ab, %xmm16, %xmm17, %xmm19
# CHECK-NEXT: 2 10 0.50 * vrangesd $ab, (%rax), %xmm17, %xmm19
# CHECK-NEXT: 1 4 0.50 vrangesd $ab, %xmm16, %xmm17, %xmm19 {%k1}
@@ -872,7 +872,7 @@ vxorps (%rax){1to16}, %zmm17, %zmm19 {z}{k1}
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11]
-# CHECK-NEXT: - - 197.25 73.25 102.00 102.00 5.50 209.25 0.25 5.50 5.50 5.50
+# CHECK-NEXT: - - 209.25 73.25 102.00 102.00 5.50 197.25 0.25 5.50 5.50 5.50
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] Instructions:
@@ -1162,30 +1162,30 @@ vxorps (%rax){1to16}, %zmm17, %zmm19 {z}{k1}
# CHECK-NEXT: - - 3.00 - 0.50 0.50 - - - - - - vpmullq (%rax), %zmm17, %zmm19 {%k1}
# CHECK-NEXT: - - 3.00 - - - - - - - - - vpmullq %zmm16, %zmm17, %zmm19 {%k1} {z}
# CHECK-NEXT: - - 3.00 - 0.50 0.50 - - - - - - vpmullq (%rax), %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vrangepd $ab, %zmm16, %zmm17, %zmm19
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vrangepd $ab, (%rax), %zmm17, %zmm19
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vrangepd $ab, (%rax){1to8}, %zmm17, %zmm19
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vrangepd $ab, %zmm16, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vrangepd $ab, (%rax), %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vrangepd $ab, (%rax){1to8}, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vrangepd $ab, %zmm16, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vrangepd $ab, (%rax), %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vrangepd $ab, (%rax){1to8}, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vrangepd $ab, {sae}, %zmm16, %zmm17, %zmm19
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vrangepd $ab, {sae}, %zmm16, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vrangepd $ab, {sae}, %zmm16, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vrangeps $ab, %zmm16, %zmm17, %zmm19
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vrangeps $ab, (%rax), %zmm17, %zmm19
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vrangeps $ab, (%rax){1to16}, %zmm17, %zmm19
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vrangeps $ab, %zmm16, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vrangeps $ab, (%rax), %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vrangeps $ab, (%rax){1to16}, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vrangeps $ab, %zmm16, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vrangeps $ab, (%rax), %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 - 0.50 0.50 - 0.50 - - - - vrangeps $ab, (%rax){1to16}, %zmm17, %zmm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vrangeps $ab, {sae}, %zmm16, %zmm17, %zmm19
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vrangeps $ab, {sae}, %zmm16, %zmm17, %zmm19 {%k1}
-# CHECK-NEXT: - - 0.50 - - - - 0.50 - - - - vrangeps $ab, {sae}, %zmm16, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vrangepd $ab, %zmm16, %zmm17, %zmm19
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vrangepd $ab, (%rax), %zmm17, %zmm19
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vrangepd $ab, (%rax){1to8}, %zmm17, %zmm19
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vrangepd $ab, %zmm16, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vrangepd $ab, (%rax), %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vrangepd $ab, (%rax){1to8}, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vrangepd $ab, %zmm16, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vrangepd $ab, (%rax), %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vrangepd $ab, (%rax){1to8}, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vrangepd $ab, {sae}, %zmm16, %zmm17, %zmm19
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vrangepd $ab, {sae}, %zmm16, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vrangepd $ab, {sae}, %zmm16, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vrangeps $ab, %zmm16, %zmm17, %zmm19
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vrangeps $ab, (%rax), %zmm17, %zmm19
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vrangeps $ab, (%rax){1to16}, %zmm17, %zmm19
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vrangeps $ab, %zmm16, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vrangeps $ab, (%rax), %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vrangeps $ab, (%rax){1to16}, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vrangeps $ab, %zmm16, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vrangeps $ab, (%rax), %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - - - vrangeps $ab, (%rax){1to16}, %zmm17, %zmm19 {%k1} {z}
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vrangeps $ab, {sae}, %zmm16, %zmm17, %zmm19
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vrangeps $ab, {sae}, %zmm16, %zmm17, %zmm19 {%k1}
+# CHECK-NEXT: - - 1.00 - - - - - - - - - vrangeps $ab, {sae}, %zmm16, %zmm17, %zmm19 {%k1} {z}
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - vrangesd $ab, %xmm16, %xmm17, %xmm19
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vrangesd $ab, (%rax), %xmm17, %xmm19
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - vrangesd $ab, %xmm16, %xmm17, %xmm19 {%k1}
diff --git a/llvm/test/tools/llvm-objdump/ELF/AMDGPU/subtarget.ll b/llvm/test/tools/llvm-objdump/ELF/AMDGPU/subtarget.ll
index ca136a6..bcec6ce 100644
--- a/llvm/test/tools/llvm-objdump/ELF/AMDGPU/subtarget.ll
+++ b/llvm/test/tools/llvm-objdump/ELF/AMDGPU/subtarget.ll
@@ -54,8 +54,8 @@ define amdgpu_kernel void @test_kernel() {
; RUN: diff %t-specify.txt %t-detect.txt
; ----------------------------------GFX10--------------------------------------
-; RUN: llc -mtriple=amdgcn-amd-amdhsa --amdhsa-code-object-version=6 -mcpu=gfx10.3-generic -filetype=obj -O0 -o %t.o %s
-; RUN: llvm-objdump -D --arch-name=amdgcn -mllvm --amdhsa-code-object-version=6 --mcpu=gfx10.3-generic %t.o > %t-specify.txt
+; RUN: llc -mtriple=amdgcn-amd-amdhsa --amdhsa-code-object-version=6 -mcpu=gfx10-3-generic -filetype=obj -O0 -o %t.o %s
+; RUN: llvm-objdump -D --arch-name=amdgcn -mllvm --amdhsa-code-object-version=6 --mcpu=gfx10-3-generic %t.o > %t-specify.txt
; RUN: llvm-objdump -D -mllvm --amdhsa-code-object-version=6 %t.o > %t-detect.txt
; RUN: diff %t-specify.txt %t-detect.txt
@@ -94,8 +94,8 @@ define amdgpu_kernel void @test_kernel() {
; RUN: llvm-objdump -D %t.o > %t-detect.txt
; RUN: diff %t-specify.txt %t-detect.txt
-; RUN: llc -mtriple=amdgcn-amd-amdhsa --amdhsa-code-object-version=6 -mcpu=gfx10.1-generic -filetype=obj -O0 -o %t.o %s
-; RUN: llvm-objdump -D --arch-name=amdgcn -mllvm --amdhsa-code-object-version=6 --mcpu=gfx10.1-generic %t.o > %t-specify.txt
+; RUN: llc -mtriple=amdgcn-amd-amdhsa --amdhsa-code-object-version=6 -mcpu=gfx10-1-generic -filetype=obj -O0 -o %t.o %s
+; RUN: llvm-objdump -D --arch-name=amdgcn -mllvm --amdhsa-code-object-version=6 --mcpu=gfx10-1-generic %t.o > %t-specify.txt
; RUN: llvm-objdump -D -mllvm --amdhsa-code-object-version=6 %t.o > %t-detect.txt
; RUN: diff %t-specify.txt %t-detect.txt
diff --git a/llvm/test/tools/obj2yaml/ELF/bb-addr-map-pgo-analysis-map.yaml b/llvm/test/tools/obj2yaml/ELF/bb-addr-map-pgo-analysis-map.yaml
new file mode 100644
index 0000000..299bf46
--- /dev/null
+++ b/llvm/test/tools/obj2yaml/ELF/bb-addr-map-pgo-analysis-map.yaml
@@ -0,0 +1,232 @@
+## Check how obj2yaml produces YAML PGO Analysis Map in .llvm_bb_addr_map.
+
+## Check that obj2yaml uses the "Entries" tag to describe an .llvm_bb_addr_map section.
+
+# RUN: yaml2obj --docnum=1 %s -o %t1
+# RUN: obj2yaml %t1 | FileCheck %s --check-prefix=VALID
+
+# VALID: --- !ELF
+# VALID-NEXT: FileHeader:
+# VALID-NEXT: Class: ELFCLASS64
+# VALID-NEXT: Data: ELFDATA2LSB
+# VALID-NEXT: Type: ET_EXEC
+# VALID-NEXT: Sections:
+# VALID-NEXT: - Name: .llvm_bb_addr_map
+# VALID-NEXT: Type: SHT_LLVM_BB_ADDR_MAP
+# VALID-NEXT: Entries:
+# VALID-NEXT: - Version: 2
+# VALID-NEXT: Feature: 0x7
+## The 'BaseAddress' field is omitted when it's zero.
+# VALID-NEXT: BBRanges:
+# VALID-NEXT: - BBEntries:
+# VALID-NEXT: - ID: 0
+# VALID-NEXT: AddressOffset: 0x1
+# VALID-NEXT: Size: 0x2
+# VALID-NEXT: Metadata: 0x3
+# VALID-NEXT: - ID: 2
+# VALID-NEXT: AddressOffset: 0x4
+# VALID-NEXT: Size: 0x5
+# VALID-NEXT: Metadata: 0x6
+# VALID-NEXT: - ID: 4
+# VALID-NEXT: AddressOffset: 0xFFFFFFFFFFFFFFF7
+# VALID-NEXT: Size: 0xFFFFFFFFFFFFFFF8
+# VALID-NEXT: Metadata: 0xFFFFFFFFFFFFFFF9
+# VALID-NEXT: - Version: 2
+# VALID-NEXT: Feature: 0xA
+# VALID-NEXT: BBRanges:
+# VALID-NEXT: - BaseAddress: 0xFFFFFFFFFFFFFF20
+# VALID-NEXT: BBEntries:
+# VALID-NEXT: - ID: 6
+# VALID-NEXT: AddressOffset: 0xA
+# VALID-NEXT: Size: 0xB
+# VALID-NEXT: Metadata: 0xC
+# VALID-NEXT: PGOAnalyses:
+# VALID-NEXT: - FuncEntryCount: 100
+# VALID-NEXT: PGOBBEntries:
+# VALID-NEXT: - BBFreq: 100
+# VALID-NEXT: Successors:
+# VALID-NEXT: - ID: 2
+# VALID-NEXT: BrProb: 0x80000000
+# VALID-NEXT: - ID: 4
+# VALID-NEXT: BrProb: 0x80000000
+# VALID-NEXT: - BBFreq: 50
+# VALID-NEXT: Successors:
+# VALID-NEXT: - ID: 4
+# VALID-NEXT: BrProb: 0xFFFFFFFF
+# VALID-NEXT: - BBFreq: 100
+# VALID-NEXT: Successors: []
+# VALID-NEXT: PGOBBEntries:
+# VALID-NEXT: - BBFreq: 20
+
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_EXEC
+Sections:
+ - Name: .llvm_bb_addr_map
+ Type: SHT_LLVM_BB_ADDR_MAP
+ ShSize: [[SIZE=<none>]]
+ Entries:
+ - Version: 2
+ Feature: 0x7
+ BBRanges:
+ - BaseAddress: 0x0
+ BBEntries:
+ - ID: 0
+ AddressOffset: 0x1
+ Size: 0x2
+ Metadata: 0x3
+ - ID: 2
+ AddressOffset: 0x4
+ Size: 0x5
+ Metadata: 0x6
+ - ID: 4
+ AddressOffset: 0xFFFFFFFFFFFFFFF7
+ Size: 0xFFFFFFFFFFFFFFF8
+ Metadata: 0xFFFFFFFFFFFFFFF9
+ - Version: 2
+ Feature: 0xA
+ BBRanges:
+ - BaseAddress: 0xFFFFFFFFFFFFFF20
+ BBEntries:
+ - ID: 6
+ AddressOffset: 0xA
+ Size: 0xB
+ Metadata: 0xC
+ PGOAnalyses:
+ - FuncEntryCount: 100
+ PGOBBEntries:
+ - BBFreq: 100
+ Successors:
+ - ID: 2
+ BrProb: 0x80000000
+ - ID: 4
+ BrProb: 0x80000000
+ - BBFreq: 50
+ Successors:
+ - ID: 4
+ BrProb: 0xFFFFFFFF
+ - BBFreq: 100
+ Successors: []
+ - PGOBBEntries:
+ - BBFreq: 20
+
+## Check obj2yaml can dump multiple .llvm_bb_addr_map sections.
+
+# RUN: yaml2obj --docnum=2 %s -o %t2
+# RUN: obj2yaml %t2 | FileCheck %s --check-prefix=MULTI
+
+# MULTI: --- !ELF
+# MULTI-NEXT: FileHeader:
+# MULTI-NEXT: Class: ELFCLASS64
+# MULTI-NEXT: Data: ELFDATA2LSB
+# MULTI-NEXT: Type: ET_EXEC
+# MULTI-NEXT: Sections:
+# MULTI-NEXT: - Name: .llvm_bb_addr_map
+# MULTI-NEXT: Type: SHT_LLVM_BB_ADDR_MAP
+# MULTI-NEXT: Entries:
+## Fieldx 'BaseAddress' is omitted when it is zero.
+# MULTI-NEXT: - Version: 0
+# MULTI-NEXT: Feature: 0x3
+# MULTI-NEXT: BBRanges:
+# MULTI-NEXT: - BBEntries:
+# MULTI-NEXT: - ID: 0
+# MULTI-NEXT: AddressOffset: 0x1
+# MULTI-NEXT: Size: 0x2
+# MULTI-NEXT: Metadata: 0x3
+# MULTI-NEXT: PGOAnalyses:
+# MULTI-NEXT: - FuncEntryCount: 0
+# MULTI-NEXT: PGOBBEntries:
+# MULTI-NEXT: - BBFreq: 0
+# MULTI-NEXT: - Name: '.llvm_bb_addr_map (1)'
+# MULTI-NEXT: Type: SHT_LLVM_BB_ADDR_MAP
+# MULTI-NEXT: Entries:
+# MULTI-NEXT: - Version: 0
+# MULTI-NEXT: Feature: 0x1
+# MULTI-NEXT: BBRanges:
+# MULTI-NEXT: - BaseAddress: 0x20
+# MULTI-NEXT: BBEntries: []
+# MULTI-NEXT: PGOAnalyses:
+# MULTI-NEXT: - FuncEntryCount: 0
+
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_EXEC
+Sections:
+ - Name: .llvm_bb_addr_map
+ Type: SHT_LLVM_BB_ADDR_MAP
+ Entries:
+## Check that obj2yaml does not emit the 'BaseAddress' and 'Feature' fields when
+## they are zero.
+ - Version: 0
+ Feature: 0x3
+ BBRanges:
+ - BaseAddress: 0x0
+ BBEntries:
+ - AddressOffset: 0x1
+ Size: 0x2
+ Metadata: 0x3
+ PGOAnalyses:
+ - FuncEntryCount: 0
+ PGOBBEntries:
+ - BBFreq: 0
+ - Name: '.llvm_bb_addr_map (1)'
+ Type: SHT_LLVM_BB_ADDR_MAP
+ Entries:
+ - Version: 0
+ Feature: 0x1
+ BBRanges:
+ - BaseAddress: 0x20
+ PGOAnalyses:
+ - FuncEntryCount: 0
+
+## Check that obj2yaml uses the "Content" tag to describe an .llvm_bb_addr_map section
+## when it can't extract the entries, for example, when the section is truncated.
+
+# RUN: yaml2obj --docnum=1 -DSIZE=0x1D %s -o %t3
+# RUN: obj2yaml %t3 | FileCheck %s --check-prefixes=TRUNCATED,INVALID
+
+# INVALID: --- !ELF
+# INVALID-NEXT: FileHeader:
+# INVALID-NEXT: Class: ELFCLASS64
+# INVALID-NEXT: Data: ELFDATA2LSB
+# INVALID-NEXT: Type: ET_EXEC
+# INVALID-NEXT: Sections:
+# INVALID-NEXT: - Name: .llvm_bb_addr_map
+# INVALID-NEXT: Type: SHT_LLVM_BB_ADDR_MAP
+# TRUNCATED-NEXT: Content: {{([[:xdigit:]]{58})}}{{$}}
+# TRUNCATED-NEXT: Content: {{([[:xdigit:]]{58})}}{{$}}
+
+## Check that obj2yaml uses the "Content" tag when original YAML does not
+## provide a PGO field that was enabled in the feature byte
+
+# RUN: yaml2obj --docnum=3 %s -o %t4
+# RUN: obj2yaml %t4 | FileCheck %s --check-prefix=MISSING-FEC
+
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_EXEC
+Sections:
+ - Name: '.llvm_bb_addr_map'
+ Type: SHT_LLVM_BB_ADDR_MAP
+ Entries:
+ - Version: 0
+ Feature: 0x1
+ BBRanges:
+ - BaseAddress: 0x20
+
+# MISSING-FEC: --- !ELF
+# MISSING-FEC-NEXT: FileHeader:
+# MISSING-FEC-NEXT: Class: ELFCLASS64
+# MISSING-FEC-NEXT: Data: ELFDATA2LSB
+# MISSING-FEC-NEXT: Type: ET_EXEC
+# MISSING-FEC-NEXT: Sections:
+# MISSING-FEC-NEXT: - Name: .llvm_bb_addr_map
+# MISSING-FEC-NEXT: Type: SHT_LLVM_BB_ADDR_MAP
+# MISSING-FEC-NEXT: Content: '{{([[:xdigit:]]+)}}'{{$}}
+
diff --git a/llvm/test/tools/yaml2obj/ELF/bb-addr-map-pgo-analysis-map.yaml b/llvm/test/tools/yaml2obj/ELF/bb-addr-map-pgo-analysis-map.yaml
new file mode 100644
index 0000000..4dfaf60
--- /dev/null
+++ b/llvm/test/tools/yaml2obj/ELF/bb-addr-map-pgo-analysis-map.yaml
@@ -0,0 +1,83 @@
+## Check how yaml2obj produces PGO Analysis Map in .llvm_bb_addr_map section.
+
+# RUN: yaml2obj --docnum=1 %s -o %t1
+# RUN: llvm-readobj --sections --section-data %t1 | FileCheck %s
+
+# Case 4: Specify Entries.
+# CHECK: Name: .llvm_bb_addr_map (1)
+# CHECK: SectionData (
+# CHECK-NEXT: 0000: 02072000 00000000 0000010B 010203E8
+# CHECK-NEXT: 0010: 07E80702 0CEEDDBB F70E0D91 A2C48801
+# CHECK-NEXT: )
+
+# Case 7: Not including a field which is enabled in feature doesn't emit value
+# CHECK: Name: .llvm_bb_addr_map (1)
+# CHECK: SectionData (
+# CHECK-NEXT: 0000: 02012000 00000000 0000020D 010203 |
+# CHECK-NEXT: )
+
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_EXEC
+Sections:
+
+## Test the following cases:
+
+## 1) We can produce an .llvm_bb_addr_map section from a description with
+## Entries and PGO Analysis data.
+ - Name: '.llvm_bb_addr_map (1)'
+ Type: SHT_LLVM_BB_ADDR_MAP
+ Entries:
+ - Version: 2
+ Feature: 0x7
+ BBRanges:
+ - BaseAddress: 0x0000000000000020
+ BBEntries:
+ - ID: 11
+ AddressOffset: 0x00000001
+ Size: 0x00000002
+ Metadata: 0x00000003
+ PGOAnalyses:
+ - FuncEntryCount: 1000
+ PGOBBEntries:
+ - BBFreq: 1000
+ Successors:
+ - ID: 12
+ BrProb: 0xeeeeeeee
+ - ID: 13
+ BrProb: 0x11111111
+
+## 2) According to feature we have FuncEntryCount but none is provided in yaml
+ - Name: '.llvm_bb_addr_map (2)'
+ Type: SHT_LLVM_BB_ADDR_MAP
+ Entries:
+ - Version: 2
+ Feature: 0x1
+ BBRanges:
+ - BaseAddress: 0x0000000000000020
+ NumBlocks: 2
+ BBEntries:
+ - ID: 13
+ AddressOffset: 0x00000001
+ Size: 0x00000002
+ Metadata: 0x00000003
+
+## Check that yaml2obj generates a warning when we use unsupported feature.
+# RUN: yaml2obj --docnum=2 %s 2>&1 | FileCheck %s --check-prefix=INVALID-FEATURE
+# INVALID-FEATURE: warning: invalid encoding for BBAddrMap::Features: 0xff
+
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_EXEC
+Sections:
+ - Name: '.llvm_bb_addr_map'
+ Type: SHT_LLVM_BB_ADDR_MAP
+ Entries:
+ - Version: 2
+## Specify unsupported feature
+ Feature: 0xFF
+
diff --git a/llvm/tools/llvm-dwarfdump/llvm-dwarfdump.cpp b/llvm/tools/llvm-dwarfdump/llvm-dwarfdump.cpp
index 8cdd84b..2b438a8 100644
--- a/llvm/tools/llvm-dwarfdump/llvm-dwarfdump.cpp
+++ b/llvm/tools/llvm-dwarfdump/llvm-dwarfdump.cpp
@@ -863,7 +863,7 @@ int main(int argc, char **argv) {
if (DumpAll)
DumpType = DIDT_All;
if (DumpType == DIDT_Null) {
- if (Verbose)
+ if (Verbose || Verify)
DumpType = DIDT_All;
else
DumpType = DIDT_DebugInfo;
diff --git a/llvm/tools/llvm-readobj/ELFDumper.cpp b/llvm/tools/llvm-readobj/ELFDumper.cpp
index 8e68f08..50ea63e 100644
--- a/llvm/tools/llvm-readobj/ELFDumper.cpp
+++ b/llvm/tools/llvm-readobj/ELFDumper.cpp
@@ -1622,8 +1622,8 @@ const EnumEntry<unsigned> ElfHeaderMipsFlags[] = {
ENUM_ENT(EF_AMDGPU_MACH_AMDGCN_GFX1200, "gfx1200"), \
ENUM_ENT(EF_AMDGPU_MACH_AMDGCN_GFX1201, "gfx1201"), \
ENUM_ENT(EF_AMDGPU_MACH_AMDGCN_GFX9_GENERIC, "gfx9-generic"), \
- ENUM_ENT(EF_AMDGPU_MACH_AMDGCN_GFX10_1_GENERIC, "gfx10.1-generic"), \
- ENUM_ENT(EF_AMDGPU_MACH_AMDGCN_GFX10_3_GENERIC, "gfx10.3-generic"), \
+ ENUM_ENT(EF_AMDGPU_MACH_AMDGCN_GFX10_1_GENERIC, "gfx10-1-generic"), \
+ ENUM_ENT(EF_AMDGPU_MACH_AMDGCN_GFX10_3_GENERIC, "gfx10-3-generic"), \
ENUM_ENT(EF_AMDGPU_MACH_AMDGCN_GFX11_GENERIC, "gfx11-generic")
// clang-format on
diff --git a/llvm/tools/obj2yaml/elf2yaml.cpp b/llvm/tools/obj2yaml/elf2yaml.cpp
index 38a69f7..6b9af90 100644
--- a/llvm/tools/obj2yaml/elf2yaml.cpp
+++ b/llvm/tools/obj2yaml/elf2yaml.cpp
@@ -889,6 +889,8 @@ ELFDumper<ELFT>::dumpBBAddrMapSection(const Elf_Shdr *Shdr) {
DataExtractor Data(Content, Obj.isLE(), ELFT::Is64Bits ? 8 : 4);
std::vector<ELFYAML::BBAddrMapEntry> Entries;
+ bool HasAnyPGOAnalysisMapEntry = false;
+ std::vector<ELFYAML::PGOAnalysisMapEntry> PGOAnalyses;
DataExtractor::Cursor Cur(0);
uint8_t Version = 0;
uint8_t Feature = 0;
@@ -905,6 +907,7 @@ ELFDumper<ELFT>::dumpBBAddrMapSection(const Elf_Shdr *Shdr) {
}
uint64_t NumBBRanges = 1;
uint64_t NumBlocks = 0;
+ uint32_t TotalNumBlocks = 0;
auto FeatureOrErr = llvm::object::BBAddrMap::Features::decode(Feature);
if (!FeatureOrErr)
return FeatureOrErr.takeError();
@@ -934,10 +937,42 @@ ELFDumper<ELFT>::dumpBBAddrMapSection(const Elf_Shdr *Shdr) {
uint64_t Metadata = Data.getULEB128(Cur);
BBEntries.push_back({ID, Offset, Size, Metadata});
}
+ TotalNumBlocks += BBEntries.size();
BBRanges.push_back({BaseAddress, /*NumBlocks=*/{}, BBEntries});
}
Entries.push_back(
{Version, Feature, /*NumBBRanges=*/{}, std::move(BBRanges)});
+
+ ELFYAML::PGOAnalysisMapEntry &PGOAnalysis = PGOAnalyses.emplace_back();
+ if (FeatureOrErr->hasPGOAnalysis()) {
+ HasAnyPGOAnalysisMapEntry = true;
+
+ if (FeatureOrErr->FuncEntryCount)
+ PGOAnalysis.FuncEntryCount = Data.getULEB128(Cur);
+
+ if (FeatureOrErr->hasPGOAnalysisBBData()) {
+ auto &PGOBBEntries = PGOAnalysis.PGOBBEntries.emplace();
+ for (uint64_t BlockIndex = 0; Cur && BlockIndex < TotalNumBlocks;
+ ++BlockIndex) {
+ auto &PGOBBEntry = PGOBBEntries.emplace_back();
+ if (FeatureOrErr->BBFreq) {
+ PGOBBEntry.BBFreq = Data.getULEB128(Cur);
+ if (!Cur)
+ break;
+ }
+
+ if (FeatureOrErr->BrProb) {
+ auto &SuccEntries = PGOBBEntry.Successors.emplace();
+ uint64_t SuccCount = Data.getULEB128(Cur);
+ for (uint64_t SuccIdx = 0; Cur && SuccIdx < SuccCount; ++SuccIdx) {
+ uint32_t ID = Data.getULEB128(Cur);
+ uint32_t BrProb = Data.getULEB128(Cur);
+ SuccEntries.push_back({ID, BrProb});
+ }
+ }
+ }
+ }
+ }
}
if (!Cur) {
@@ -946,6 +981,8 @@ ELFDumper<ELFT>::dumpBBAddrMapSection(const Elf_Shdr *Shdr) {
S->Content = yaml::BinaryRef(Content);
} else {
S->Entries = std::move(Entries);
+ if (HasAnyPGOAnalysisMapEntry)
+ S->PGOAnalyses = std::move(PGOAnalyses);
}
return S.release();
diff --git a/llvm/unittests/ADT/BitVectorTest.cpp b/llvm/unittests/ADT/BitVectorTest.cpp
index e00e11e..6a4780c 100644
--- a/llvm/unittests/ADT/BitVectorTest.cpp
+++ b/llvm/unittests/ADT/BitVectorTest.cpp
@@ -1143,6 +1143,9 @@ TYPED_TEST(BitVectorTest, EmptyVectorGetData) {
}
TYPED_TEST(BitVectorTest, Iterators) {
+ TypeParam Singleton(1, true);
+ EXPECT_EQ(std::next(Singleton.set_bits_begin()), Singleton.set_bits_end());
+
TypeParam Filled(10, true);
EXPECT_NE(Filled.set_bits_begin(), Filled.set_bits_end());
unsigned Counter = 0;
diff --git a/llvm/unittests/CodeGen/LowLevelTypeTest.cpp b/llvm/unittests/CodeGen/LowLevelTypeTest.cpp
index d13cfee..cb34802 100644
--- a/llvm/unittests/CodeGen/LowLevelTypeTest.cpp
+++ b/llvm/unittests/CodeGen/LowLevelTypeTest.cpp
@@ -259,6 +259,7 @@ TEST(LowLevelTypeTest, Pointer) {
// Test kind.
ASSERT_TRUE(Ty.isValid());
ASSERT_TRUE(Ty.isPointer());
+ ASSERT_TRUE(Ty.isPointerOrPointerVector());
ASSERT_FALSE(Ty.isScalar());
ASSERT_FALSE(Ty.isVector());
@@ -266,6 +267,8 @@ TEST(LowLevelTypeTest, Pointer) {
ASSERT_TRUE(VTy.isValid());
ASSERT_TRUE(VTy.isVector());
ASSERT_TRUE(VTy.getElementType().isPointer());
+ ASSERT_TRUE(VTy.isPointerVector());
+ ASSERT_TRUE(VTy.isPointerOrPointerVector());
EXPECT_EQ(Ty, VTy.getElementType());
EXPECT_EQ(Ty.getSizeInBits(), VTy.getScalarSizeInBits());
diff --git a/llvm/unittests/ProfileData/CoverageMappingTest.cpp b/llvm/unittests/ProfileData/CoverageMappingTest.cpp
index 2849781..425b3d1 100644
--- a/llvm/unittests/ProfileData/CoverageMappingTest.cpp
+++ b/llvm/unittests/ProfileData/CoverageMappingTest.cpp
@@ -192,23 +192,21 @@ struct CoverageMappingTest : ::testing::TestWithParam<std::tuple<bool, bool>> {
addCMR(Counter::getZero(), File, LS, CS, LE, CE, true);
}
- void addMCDCDecisionCMR(unsigned Mask, unsigned NC, StringRef File,
+ void addMCDCDecisionCMR(unsigned Mask, uint16_t NC, StringRef File,
unsigned LS, unsigned CS, unsigned LE, unsigned CE) {
auto &Regions = InputFunctions.back().Regions;
unsigned FileID = getFileIndexForFunction(File);
Regions.push_back(CounterMappingRegion::makeDecisionRegion(
- CounterMappingRegion::MCDCParameters{Mask, NC}, FileID, LS, CS, LE,
- CE));
+ mcdc::DecisionParameters{Mask, NC}, FileID, LS, CS, LE, CE));
}
- void addMCDCBranchCMR(Counter C1, Counter C2, unsigned ID, unsigned TrueID,
- unsigned FalseID, StringRef File, unsigned LS,
+ void addMCDCBranchCMR(Counter C1, Counter C2, mcdc::ConditionID ID,
+ mcdc::ConditionIDs Conds, StringRef File, unsigned LS,
unsigned CS, unsigned LE, unsigned CE) {
auto &Regions = InputFunctions.back().Regions;
unsigned FileID = getFileIndexForFunction(File);
Regions.push_back(CounterMappingRegion::makeBranchRegion(
- C1, C2, CounterMappingRegion::MCDCParameters{0, 0, ID, TrueID, FalseID},
- FileID, LS, CS, LE, CE));
+ C1, C2, FileID, LS, CS, LE, CE, mcdc::BranchParameters{ID, Conds}));
}
void addExpansionCMR(StringRef File, StringRef ExpandedFile, unsigned LS,
@@ -874,9 +872,9 @@ TEST_P(CoverageMappingTest, non_code_region_bitmask) {
addCMR(Counter::getCounter(3), "file", 1, 1, 5, 5);
addMCDCDecisionCMR(0, 2, "file", 7, 1, 7, 6);
- addMCDCBranchCMR(Counter::getCounter(0), Counter::getCounter(1), 1, 2, 0,
+ addMCDCBranchCMR(Counter::getCounter(0), Counter::getCounter(1), 0, {-1, 1},
"file", 7, 2, 7, 3);
- addMCDCBranchCMR(Counter::getCounter(2), Counter::getCounter(3), 2, 0, 0,
+ addMCDCBranchCMR(Counter::getCounter(2), Counter::getCounter(3), 1, {-1, -1},
"file", 7, 4, 7, 5);
EXPECT_THAT_ERROR(loadCoverageMapping(), Succeeded());
@@ -902,11 +900,11 @@ TEST_P(CoverageMappingTest, decision_before_expansion) {
addExpansionCMR("foo", "B", 4, 19, 4, 20);
addCMR(Counter::getCounter(0), "A", 1, 14, 1, 17);
addCMR(Counter::getCounter(0), "A", 1, 14, 1, 17);
- addMCDCBranchCMR(Counter::getCounter(0), Counter::getCounter(1), 1, 2, 0, "A",
- 1, 14, 1, 17);
+ addMCDCBranchCMR(Counter::getCounter(0), Counter::getCounter(1), 0, {-1, 1},
+ "A", 1, 14, 1, 17);
addCMR(Counter::getCounter(1), "B", 1, 14, 1, 17);
- addMCDCBranchCMR(Counter::getCounter(1), Counter::getCounter(2), 2, 0, 0, "B",
- 1, 14, 1, 17);
+ addMCDCBranchCMR(Counter::getCounter(1), Counter::getCounter(2), 1, {-1, -1},
+ "B", 1, 14, 1, 17);
// InputFunctionCoverageData::Regions is rewritten after the write.
auto InputRegions = InputFunctions.back().Regions;
diff --git a/llvm/unittests/Support/CMakeLists.txt b/llvm/unittests/Support/CMakeLists.txt
index df35a7b..15a1262 100644
--- a/llvm/unittests/Support/CMakeLists.txt
+++ b/llvm/unittests/Support/CMakeLists.txt
@@ -38,6 +38,7 @@ add_llvm_unittest(SupportTests
ErrnoTest.cpp
ErrorOrTest.cpp
ErrorTest.cpp
+ ExponentialBackoffTest.cpp
ExtensibleRTTITest.cpp
FileCollectorTest.cpp
FileOutputBufferTest.cpp
diff --git a/llvm/unittests/Support/ExponentialBackoffTest.cpp b/llvm/unittests/Support/ExponentialBackoffTest.cpp
new file mode 100644
index 0000000..2576049
--- /dev/null
+++ b/llvm/unittests/Support/ExponentialBackoffTest.cpp
@@ -0,0 +1,31 @@
+//===- unittests/ExponentialBackoffTest.cpp -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/ExponentialBackoff.h"
+#include "gtest/gtest.h"
+#include <chrono>
+
+using namespace llvm;
+using namespace std::chrono_literals;
+
+namespace {
+
+TEST(ExponentialBackoffTest, Timeout) {
+ auto Start = std::chrono::steady_clock::now();
+ // Use short enough times that this test runs quickly.
+ ExponentialBackoff Backoff(100ms, 1ms, 10ms);
+ do {
+ } while (Backoff.waitForNextAttempt());
+ auto Duration = std::chrono::steady_clock::now() - Start;
+ EXPECT_GE(Duration, 100ms);
+}
+
+// Testing individual wait duration is omitted as those tests would be
+// non-deterministic.
+
+} // end anonymous namespace
diff --git a/llvm/unittests/Target/SPIRV/CMakeLists.txt b/llvm/unittests/Target/SPIRV/CMakeLists.txt
index 326a74b..83ae215 100644
--- a/llvm/unittests/Target/SPIRV/CMakeLists.txt
+++ b/llvm/unittests/Target/SPIRV/CMakeLists.txt
@@ -4,6 +4,7 @@ include_directories(
)
set(LLVM_LINK_COMPONENTS
+ Analysis
AsmParser
Core
SPIRVCodeGen
diff --git a/llvm/unittests/TargetParser/TargetParserTest.cpp b/llvm/unittests/TargetParser/TargetParserTest.cpp
index e89fc68..c6ee39f 100644
--- a/llvm/unittests/TargetParser/TargetParserTest.cpp
+++ b/llvm/unittests/TargetParser/TargetParserTest.cpp
@@ -976,6 +976,14 @@ TEST(TargetParserTest, ARMparseArchVersion) {
EXPECT_EQ(5u, ARM::parseArchVersion(ARMArch[i]));
}
+TEST(TargetParserTest, ARMparseArchMinorVersion) {
+ for (unsigned i = 0; i < std::size(ARMArch); i++)
+ if (((std::string)ARMArch[i]).find(".") == 5)
+ EXPECT_EQ((ARMArch[i][6] - 48u), ARM::parseArchMinorVersion(ARMArch[i]));
+ else
+ EXPECT_EQ(0u, ARM::parseArchMinorVersion(ARMArch[i]));
+}
+
TEST(TargetParserTest, getARMCPUForArch) {
// Platform specific defaults.
{
diff --git a/llvm/utils/TableGen/AsmMatcherEmitter.cpp b/llvm/utils/TableGen/AsmMatcherEmitter.cpp
index 9065885..3fcf2d8 100644
--- a/llvm/utils/TableGen/AsmMatcherEmitter.cpp
+++ b/llvm/utils/TableGen/AsmMatcherEmitter.cpp
@@ -242,11 +242,10 @@ public:
if (!isRegisterClass() || !RHS.isRegisterClass())
return false;
- RegisterSet Tmp;
- std::insert_iterator<RegisterSet> II(Tmp, Tmp.begin());
+ std::vector<Record *> Tmp;
std::set_intersection(Registers.begin(), Registers.end(),
- RHS.Registers.begin(), RHS.Registers.end(), II,
- LessRecordByID());
+ RHS.Registers.begin(), RHS.Registers.end(),
+ std::back_inserter(Tmp), LessRecordByID());
return !Tmp.empty();
}
@@ -1270,16 +1269,15 @@ void AsmMatcherInfo::buildRegisterClasses(
}
RegisterSet Tmp;
- std::swap(Tmp, ContainingSet);
- std::insert_iterator<RegisterSet> II(ContainingSet,
- ContainingSet.begin());
- std::set_intersection(Tmp.begin(), Tmp.end(), RS.begin(), RS.end(), II,
- LessRecordByID());
+ std::set_intersection(ContainingSet.begin(), ContainingSet.end(),
+ RS.begin(), RS.end(),
+ std::inserter(Tmp, Tmp.begin()), LessRecordByID());
+ ContainingSet = std::move(Tmp);
}
if (!ContainingSet.empty()) {
RegisterSets.insert(ContainingSet);
- RegisterMap.insert(std::make_pair(CGR.TheDef, ContainingSet));
+ RegisterMap.insert(std::pair(CGR.TheDef, ContainingSet));
}
}
@@ -1300,7 +1298,7 @@ void AsmMatcherInfo::buildRegisterClasses(
CI->DiagnosticType = "";
CI->IsOptional = false;
CI->DefaultMethod = ""; // unused
- RegisterSetClasses.insert(std::make_pair(RS, CI));
+ RegisterSetClasses.insert(std::pair(RS, CI));
++Index;
}
@@ -1342,7 +1340,7 @@ void AsmMatcherInfo::buildRegisterClasses(
if (!CI->DiagnosticString.empty() && CI->DiagnosticType.empty())
CI->DiagnosticType = RC.getName();
- RegisterClassClasses.insert(std::make_pair(Def, CI));
+ RegisterClassClasses.insert(std::pair(Def, CI));
}
// Populate the map for individual registers.
@@ -2195,7 +2193,7 @@ emitConvertFuncs(CodeGenTarget &Target, StringRef ClassName,
ConversionRow.push_back(SrcOp2);
// Also create an 'enum' for this combination of tied operands.
- auto Key = std::make_tuple(TiedOp, SrcOp1, SrcOp2);
+ auto Key = std::tuple(TiedOp, SrcOp1, SrcOp2);
TiedOperandsEnumMap.emplace(Key, TiedTupleName);
break;
}
@@ -2344,9 +2342,9 @@ emitConvertFuncs(CodeGenTarget &Target, StringRef ClassName,
// For a tied operand, emit a reference to the TiedAsmOperandTable
// that contains the operand to copy, and the parsed operands to
// check for their tied constraints.
- auto Key = std::make_tuple((uint8_t)ConversionTable[Row][i + 1],
- (uint8_t)ConversionTable[Row][i + 2],
- (uint8_t)ConversionTable[Row][i + 3]);
+ auto Key = std::tuple((uint8_t)ConversionTable[Row][i + 1],
+ (uint8_t)ConversionTable[Row][i + 2],
+ (uint8_t)ConversionTable[Row][i + 3]);
auto TiedOpndEnum = TiedOperandsEnumMap.find(Key);
assert(TiedOpndEnum != TiedOperandsEnumMap.end() &&
"No record for tied operand pair");
@@ -2814,7 +2812,7 @@ emitMnemonicAliasVariant(raw_ostream &OS, const AsmMatcherInfo &Info,
MatchCode += "return;";
- Cases.push_back(std::make_pair(AliasEntry.first, MatchCode));
+ Cases.push_back(std::pair(AliasEntry.first, MatchCode));
}
StringMatcher("Mnemonic", Cases, OS).Emit(Indent);
}
@@ -2981,7 +2979,7 @@ emitCustomOperandParsing(raw_ostream &OS, CodeGenTarget &Target,
"std::end(OperandMatchTable),\n";
OS << " Mnemonic, LessOpcodeOperand());\n\n";
} else {
- OS << " auto MnemonicRange = std::make_pair(std::begin(OperandMatchTable),"
+ OS << " auto MnemonicRange = std::pair(std::begin(OperandMatchTable),"
" std::end(OperandMatchTable));\n";
OS << " if (!Mnemonic.empty())\n";
OS << " MnemonicRange =\n";
@@ -3156,7 +3154,7 @@ static void emitMnemonicChecker(raw_ostream &OS, CodeGenTarget &Target,
OS << " auto MnemonicRange = "
"std::equal_range(Start, End, Mnemonic, LessOpcode());\n\n";
} else {
- OS << " auto MnemonicRange = std::make_pair(Start, End);\n";
+ OS << " auto MnemonicRange = std::pair(Start, End);\n";
OS << " unsigned SIndex = Mnemonic.empty() ? 0 : 1;\n";
OS << " if (!Mnemonic.empty())\n";
OS << " MnemonicRange = "
@@ -3631,7 +3629,7 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
OS << " auto MnemonicRange = "
"std::equal_range(Start, End, Mnemonic, LessOpcode());\n\n";
} else {
- OS << " auto MnemonicRange = std::make_pair(Start, End);\n";
+ OS << " auto MnemonicRange = std::pair(Start, End);\n";
OS << " unsigned SIndex = Mnemonic.empty() ? 0 : 1;\n";
OS << " if (!Mnemonic.empty())\n";
OS << " MnemonicRange = "
diff --git a/llvm/utils/TableGen/AsmWriterEmitter.cpp b/llvm/utils/TableGen/AsmWriterEmitter.cpp
index c05991f..a27061e 100644
--- a/llvm/utils/TableGen/AsmWriterEmitter.cpp
+++ b/llvm/utils/TableGen/AsmWriterEmitter.cpp
@@ -144,13 +144,12 @@ static void EmitInstructions(std::vector<AsmWriterInst> &Insts, raw_ostream &O,
O << " switch (MI->getOpcode()) {\n";
O << " default: llvm_unreachable(\"Unexpected opcode.\");\n";
std::vector<std::pair<std::string, AsmWriterOperand>> OpsToPrint;
- OpsToPrint.push_back(
- std::make_pair(FirstInst.CGI->Namespace.str() +
- "::" + FirstInst.CGI->TheDef->getName().str(),
- FirstInst.Operands[i]));
+ OpsToPrint.push_back(std::pair(FirstInst.CGI->Namespace.str() + "::" +
+ FirstInst.CGI->TheDef->getName().str(),
+ FirstInst.Operands[i]));
for (const AsmWriterInst &AWI : SimilarInsts) {
- OpsToPrint.push_back(std::make_pair(
+ OpsToPrint.push_back(std::pair(
AWI.CGI->Namespace.str() + "::" + AWI.CGI->TheDef->getName().str(),
AWI.Operands[i]));
}
@@ -722,7 +721,7 @@ public:
void addOperand(StringRef Op, int OpIdx, int PrintMethodIdx = -1) {
assert(OpIdx >= 0 && OpIdx < 0xFE && "Idx out of range");
assert(PrintMethodIdx >= -1 && PrintMethodIdx < 0xFF && "Idx out of range");
- OpMap[Op] = std::make_pair(OpIdx, PrintMethodIdx);
+ OpMap[Op] = std::pair(OpIdx, PrintMethodIdx);
}
unsigned getNumMIOps() { return NumMIOps; }
@@ -753,7 +752,7 @@ public:
Next = I;
}
- return std::make_pair(StringRef(Start, I - Start), Next);
+ return std::pair(StringRef(Start, I - Start), Next);
}
std::string formatAliasString(uint32_t &UnescapedSize) {
@@ -858,7 +857,7 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) {
const DagInit *DI = R->getValueAsDag("ResultInst");
AliasMap[getQualifiedName(DI->getOperatorAsDef(R->getLoc()))].insert(
- std::make_pair(CodeGenInstAlias(R, Target), Priority));
+ std::pair(CodeGenInstAlias(R, Target), Priority));
}
// A map of which conditions need to be met for each instruction operand
diff --git a/llvm/utils/TableGen/CTagsEmitter.cpp b/llvm/utils/TableGen/CTagsEmitter.cpp
index bda18936..e21dc36 100644
--- a/llvm/utils/TableGen/CTagsEmitter.cpp
+++ b/llvm/utils/TableGen/CTagsEmitter.cpp
@@ -40,8 +40,8 @@ public:
Line = LineAndColumn.first;
}
int operator<(const Tag &B) const {
- return std::make_tuple(Id, BufferIdentifier, Line) <
- std::make_tuple(B.Id, B.BufferIdentifier, B.Line);
+ return std::tuple(Id, BufferIdentifier, Line) <
+ std::tuple(B.Id, B.BufferIdentifier, B.Line);
}
void emit(raw_ostream &OS) const {
OS << Id << "\t" << BufferIdentifier << "\t" << Line << "\n";
diff --git a/llvm/utils/TableGen/CodeEmitterGen.cpp b/llvm/utils/TableGen/CodeEmitterGen.cpp
index d7020d1..d80761d 100644
--- a/llvm/utils/TableGen/CodeEmitterGen.cpp
+++ b/llvm/utils/TableGen/CodeEmitterGen.cpp
@@ -300,11 +300,11 @@ CodeEmitterGen::getInstructionCases(Record *R, CodeGenTarget &Target) {
append(" }\n");
}
append(" }\n");
- return std::make_pair(std::move(Case), std::move(BitOffsetCase));
+ return std::pair(std::move(Case), std::move(BitOffsetCase));
}
}
addInstructionCasesForEncoding(R, R, Target, Case, BitOffsetCase);
- return std::make_pair(std::move(Case), std::move(BitOffsetCase));
+ return std::pair(std::move(Case), std::move(BitOffsetCase));
}
void CodeEmitterGen::addInstructionCasesForEncoding(
diff --git a/llvm/utils/TableGen/CodeGenDAGPatterns.cpp b/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
index a9046e0..076d042 100644
--- a/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
+++ b/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
@@ -41,7 +41,6 @@ static inline bool isIntegerOrPtr(MVT VT) {
static inline bool isFloatingPoint(MVT VT) { return VT.isFloatingPoint(); }
static inline bool isVector(MVT VT) { return VT.isVector(); }
static inline bool isScalar(MVT VT) { return !VT.isVector(); }
-static inline bool isScalarInteger(MVT VT) { return VT.isScalarInteger(); }
template <typename Predicate>
static bool berase_if(MachineValueTypeSet &S, Predicate P) {
@@ -262,85 +261,91 @@ LLVM_DUMP_METHOD
void TypeSetByHwMode::dump() const { dbgs() << *this << '\n'; }
bool TypeSetByHwMode::intersect(SetType &Out, const SetType &In) {
- bool OutP = Out.count(MVT::iPTR), InP = In.count(MVT::iPTR);
- // Complement of In.
- auto CompIn = [&In](MVT T) -> bool { return !In.count(T); };
-
- if (OutP == InP)
- return berase_if(Out, CompIn);
-
- // Compute the intersection of scalars separately to account for only
- // one set containing iPTR.
- // The intersection of iPTR with a set of integer scalar types that does not
- // include iPTR will result in the most specific scalar type:
- // - iPTR is more specific than any set with two elements or more
- // - iPTR is less specific than any single integer scalar type.
- // For example
- // { iPTR } * { i32 } -> { i32 }
- // { iPTR } * { i32 i64 } -> { iPTR }
- // and
- // { iPTR i32 } * { i32 } -> { i32 }
- // { iPTR i32 } * { i32 i64 } -> { i32 i64 }
- // { iPTR i32 } * { i32 i64 i128 } -> { iPTR i32 }
-
- // Let In' = elements only in In, Out' = elements only in Out, and
- // IO = elements common to both. Normally IO would be returned as the result
- // of the intersection, but we need to account for iPTR being a "wildcard" of
- // sorts. Since elements in IO are those that match both sets exactly, they
- // will all belong to the output. If any of the "leftovers" (i.e. In' or
- // Out') contain iPTR, it means that the other set doesn't have it, but it
- // could have (1) a more specific type, or (2) a set of types that is less
- // specific. The "leftovers" from the other set is what we want to examine
- // more closely.
-
- auto subtract = [](const SetType &A, const SetType &B) {
- SetType Diff = A;
- berase_if(Diff, [&B](MVT T) { return B.count(T); });
- return Diff;
- };
-
- if (InP) {
- SetType OutOnly = subtract(Out, In);
- if (OutOnly.empty()) {
- // This means that Out \subset In, so no change to Out.
- return false;
- }
- unsigned NumI = llvm::count_if(OutOnly, isScalarInteger);
- if (NumI == 1 && OutOnly.size() == 1) {
- // There is only one element in Out', and it happens to be a scalar
- // integer that should be kept as a match for iPTR in In.
- return false;
+ auto IntersectP = [&](std::optional<MVT> WildVT, function_ref<bool(MVT)> P) {
+ // Complement of In within this partition.
+ auto CompIn = [&](MVT T) -> bool { return !In.count(T) && P(T); };
+
+ if (!WildVT)
+ return berase_if(Out, CompIn);
+
+ bool OutW = Out.count(*WildVT), InW = In.count(*WildVT);
+ if (OutW == InW)
+ return berase_if(Out, CompIn);
+
+ // Compute the intersection of scalars separately to account for only one
+ // set containing WildVT.
+ // The intersection of WildVT with a set of corresponding types that does
+ // not include WildVT will result in the most specific type:
+ // - WildVT is more specific than any set with two elements or more
+ // - WildVT is less specific than any single type.
+ // For example, for iPTR and scalar integer types
+ // { iPTR } * { i32 } -> { i32 }
+ // { iPTR } * { i32 i64 } -> { iPTR }
+ // and
+ // { iPTR i32 } * { i32 } -> { i32 }
+ // { iPTR i32 } * { i32 i64 } -> { i32 i64 }
+ // { iPTR i32 } * { i32 i64 i128 } -> { iPTR i32 }
+
+ // Looking at just this partition, let In' = elements only in In,
+ // Out' = elements only in Out, and IO = elements common to both. Normally
+ // IO would be returned as the result of the intersection, but we need to
+ // account for WildVT being a "wildcard" of sorts. Since elements in IO are
+ // those that match both sets exactly, they will all belong to the output.
+ // If any of the "leftovers" (i.e. In' or Out') contain WildVT, it means
+ // that the other set doesn't have it, but it could have (1) a more
+ // specific type, or (2) a set of types that is less specific. The
+ // "leftovers" from the other set is what we want to examine more closely.
+
+ auto Leftovers = [&](const SetType &A, const SetType &B) {
+ SetType Diff = A;
+ berase_if(Diff, [&](MVT T) { return B.count(T) || !P(T); });
+ return Diff;
+ };
+
+ if (InW) {
+ SetType OutLeftovers = Leftovers(Out, In);
+ if (OutLeftovers.size() < 2) {
+ // WildVT not added to Out. Keep the possible single leftover.
+ return false;
+ }
+ // WildVT replaces the leftovers.
+ berase_if(Out, CompIn);
+ Out.insert(*WildVT);
+ return true;
}
- berase_if(Out, CompIn);
- if (NumI == 1) {
- // Replace the iPTR with the leftover scalar integer.
- Out.insert(*llvm::find_if(OutOnly, isScalarInteger));
- } else if (NumI > 1) {
- Out.insert(MVT::iPTR);
+
+ // OutW == true
+ SetType InLeftovers = Leftovers(In, Out);
+ unsigned SizeOut = Out.size();
+ berase_if(Out, CompIn); // This will remove at least the WildVT.
+ if (InLeftovers.size() < 2) {
+ // WildVT deleted from Out. Add back the possible single leftover.
+ Out.insert(InLeftovers);
+ return true;
}
- return true;
- }
- // OutP == true
- SetType InOnly = subtract(In, Out);
- unsigned SizeOut = Out.size();
- berase_if(Out, CompIn); // This will remove at least the iPTR.
- unsigned NumI = llvm::count_if(InOnly, isScalarInteger);
- if (NumI == 0) {
- // iPTR deleted from Out.
- return true;
- }
- if (NumI == 1) {
- // Replace the iPTR with the leftover scalar integer.
- Out.insert(*llvm::find_if(InOnly, isScalarInteger));
- return true;
- }
+ // Keep the WildVT in Out.
+ Out.insert(*WildVT);
+ // If WildVT was the only element initially removed from Out, then Out
+ // has not changed.
+ return SizeOut != Out.size();
+ };
- // NumI > 1: Keep the iPTR in Out.
- Out.insert(MVT::iPTR);
- // If iPTR was the only element initially removed from Out, then Out
- // has not changed.
- return SizeOut != Out.size();
+ // Note: must be non-overlapping
+ using WildPartT = std::pair<MVT, std::function<bool(MVT)>>;
+ static const WildPartT WildParts[] = {
+ {MVT::iPTR, [](MVT T) { return T.isScalarInteger() || T == MVT::iPTR; }},
+ };
+
+ bool Changed = false;
+ for (const auto &I : WildParts)
+ Changed |= IntersectP(I.first, I.second);
+
+ Changed |= IntersectP(std::nullopt, [&](MVT T) {
+ return !any_of(WildParts, [=](const WildPartT &I) { return I.second(T); });
+ });
+
+ return Changed;
}
bool TypeSetByHwMode::validate() const {
@@ -530,24 +535,24 @@ bool TypeInfer::EnforceSmallerThan(TypeSetByHwMode &Small, TypeSetByHwMode &Big,
auto LT = [](MVT A, MVT B) -> bool {
// Always treat non-scalable MVTs as smaller than scalable MVTs for the
// purposes of ordering.
- auto ASize = std::make_tuple(A.isScalableVector(), A.getScalarSizeInBits(),
- A.getSizeInBits().getKnownMinValue());
- auto BSize = std::make_tuple(B.isScalableVector(), B.getScalarSizeInBits(),
- B.getSizeInBits().getKnownMinValue());
+ auto ASize = std::tuple(A.isScalableVector(), A.getScalarSizeInBits(),
+ A.getSizeInBits().getKnownMinValue());
+ auto BSize = std::tuple(B.isScalableVector(), B.getScalarSizeInBits(),
+ B.getSizeInBits().getKnownMinValue());
return ASize < BSize;
};
auto SameKindLE = [](MVT A, MVT B) -> bool {
// This function is used when removing elements: when a vector is compared
// to a non-vector or a scalable vector to any non-scalable MVT, it should
// return false (to avoid removal).
- if (std::make_tuple(A.isVector(), A.isScalableVector()) !=
- std::make_tuple(B.isVector(), B.isScalableVector()))
+ if (std::tuple(A.isVector(), A.isScalableVector()) !=
+ std::tuple(B.isVector(), B.isScalableVector()))
return false;
- return std::make_tuple(A.getScalarSizeInBits(),
- A.getSizeInBits().getKnownMinValue()) <=
- std::make_tuple(B.getScalarSizeInBits(),
- B.getSizeInBits().getKnownMinValue());
+ return std::tuple(A.getScalarSizeInBits(),
+ A.getSizeInBits().getKnownMinValue()) <=
+ std::tuple(B.getScalarSizeInBits(),
+ B.getSizeInBits().getKnownMinValue());
};
for (unsigned M : Modes) {
@@ -751,8 +756,8 @@ bool TypeInfer::EnforceSameNumElts(TypeSetByHwMode &V, TypeSetByHwMode &W) {
namespace {
struct TypeSizeComparator {
bool operator()(const TypeSize &LHS, const TypeSize &RHS) const {
- return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) <
- std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue());
+ return std::tuple(LHS.isScalable(), LHS.getKnownMinValue()) <
+ std::tuple(RHS.isScalable(), RHS.getKnownMinValue());
}
};
} // end anonymous namespace
@@ -2988,7 +2993,7 @@ TreePatternNodePtr TreePattern::ParseTreePattern(Init *TheInit,
// Check that the ComplexPattern uses are consistent: "(MY_PAT $a, $b)"
// and "(MY_PAT $b, $a)" should not be allowed in the same pattern;
// neither should "(MY_PAT_1 $a, $b)" and "(MY_PAT_2 $a, $b)".
- auto OperandId = std::make_pair(Operator, i);
+ auto OperandId = std::pair(Operator, i);
auto PrevOp = ComplexPatternOperands.find(Child->getName());
if (PrevOp != ComplexPatternOperands.end()) {
if (PrevOp->getValue() != OperandId)
@@ -3197,7 +3202,7 @@ void CodeGenDAGPatterns::ParseNodeInfo() {
while (!Nodes.empty()) {
Record *R = Nodes.back();
- SDNodes.insert(std::make_pair(R, SDNodeInfo(R, CGH)));
+ SDNodes.insert(std::pair(R, SDNodeInfo(R, CGH)));
Nodes.pop_back();
}
@@ -3217,7 +3222,7 @@ void CodeGenDAGPatterns::ParseNodeTransforms() {
Record *SDNode = XFormNode->getValueAsDef("Opcode");
StringRef Code = XFormNode->getValueAsString("XFormFunction");
SDNodeXForms.insert(
- std::make_pair(XFormNode, NodeXForm(SDNode, std::string(Code))));
+ std::pair(XFormNode, NodeXForm(SDNode, std::string(Code))));
Xforms.pop_back();
}
@@ -3227,7 +3232,7 @@ void CodeGenDAGPatterns::ParseComplexPatterns() {
std::vector<Record *> AMs =
Records.getAllDerivedDefinitions("ComplexPattern");
while (!AMs.empty()) {
- ComplexPatterns.insert(std::make_pair(AMs.back(), AMs.back()));
+ ComplexPatterns.insert(std::pair(AMs.back(), AMs.back()));
AMs.pop_back();
}
}
@@ -3340,7 +3345,7 @@ void CodeGenDAGPatterns::ParseDefaultOperands() {
std::vector<std::pair<Init *, StringInit *>> Ops;
for (unsigned op = 0, e = DefaultInfo->getNumArgs(); op != e; ++op)
Ops.push_back(
- std::make_pair(DefaultInfo->getArg(op), DefaultInfo->getArgName(op)));
+ std::pair(DefaultInfo->getArg(op), DefaultInfo->getArgName(op)));
DagInit *DI = DagInit::get(SomeSDNode, nullptr, Ops);
// Create a TreePattern to parse this.
diff --git a/llvm/utils/TableGen/CodeGenHwModes.cpp b/llvm/utils/TableGen/CodeGenHwModes.cpp
index 7c08c75..fec74d2 100644
--- a/llvm/utils/TableGen/CodeGenHwModes.cpp
+++ b/llvm/utils/TableGen/CodeGenHwModes.cpp
@@ -52,7 +52,7 @@ HwModeSelect::HwModeSelect(Record *R, CodeGenHwModes &CGH) {
}
for (unsigned i = 0, e = Modes.size(); i != e; ++i) {
unsigned ModeId = CGH.getHwModeId(Modes[i]);
- Items.push_back(std::make_pair(ModeId, Objects[i]));
+ Items.push_back(std::pair(ModeId, Objects[i]));
}
}
@@ -71,11 +71,11 @@ CodeGenHwModes::CodeGenHwModes(RecordKeeper &RK) : Records(RK) {
if (R->getName() == DefaultModeName)
continue;
Modes.emplace_back(R);
- ModeIds.insert(std::make_pair(R, Modes.size()));
+ ModeIds.insert(std::pair(R, Modes.size()));
}
for (Record *R : Records.getAllDerivedDefinitions("HwModeSelect")) {
- auto P = ModeSelects.emplace(std::make_pair(R, HwModeSelect(R, *this)));
+ auto P = ModeSelects.emplace(std::pair(R, HwModeSelect(R, *this)));
assert(P.second);
(void)P;
}
diff --git a/llvm/utils/TableGen/CodeGenInstAlias.cpp b/llvm/utils/TableGen/CodeGenInstAlias.cpp
index 8634d45..d217059 100644
--- a/llvm/utils/TableGen/CodeGenInstAlias.cpp
+++ b/llvm/utils/TableGen/CodeGenInstAlias.cpp
@@ -227,7 +227,7 @@ CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) {
InstOpRec->getValueAsDef("ParserMatchClass")
->getValueAsString("Name") != "Imm")) {
ResultOperands.push_back(ResOp);
- ResultInstOperandIndex.push_back(std::make_pair(i, -1));
+ ResultInstOperandIndex.push_back(std::pair(i, -1));
++AliasOpNo;
// Otherwise, we need to match each of the suboperands individually.
@@ -242,7 +242,7 @@ CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) {
Result->getArgName(AliasOpNo)->getAsUnquotedString() + "." +
MIOI->getArgName(SubOp)->getAsUnquotedString(),
SubRec);
- ResultInstOperandIndex.push_back(std::make_pair(i, SubOp));
+ ResultInstOperandIndex.push_back(std::pair(i, SubOp));
}
++AliasOpNo;
}
@@ -260,7 +260,7 @@ CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) {
if (tryAliasOpMatch(Result, AliasOpNo, SubRec, false, R->getLoc(), T,
ResOp)) {
ResultOperands.push_back(ResOp);
- ResultInstOperandIndex.push_back(std::make_pair(i, SubOp));
+ ResultInstOperandIndex.push_back(std::pair(i, SubOp));
++AliasOpNo;
} else {
PrintFatalError(
diff --git a/llvm/utils/TableGen/CodeGenInstruction.cpp b/llvm/utils/TableGen/CodeGenInstruction.cpp
index a569194..b00b95d 100644
--- a/llvm/utils/TableGen/CodeGenInstruction.cpp
+++ b/llvm/utils/TableGen/CodeGenInstruction.cpp
@@ -175,7 +175,7 @@ CGIOperandList::CGIOperandList(Record *R) : TheDef(R) {
}
OpInfo.SubOpNames[j] = SubArgName;
- SubOpAliases[SubArgName] = std::make_pair(i, j);
+ SubOpAliases[SubArgName] = std::pair(i, j);
}
} else if (!EncoderMethod.empty()) {
// If we have no explicit sub-op dag, but have an top-level encoder
@@ -276,7 +276,7 @@ CGIOperandList::ParseOperandName(StringRef Op, bool AllowWholeOp) {
Op + "'");
// Otherwise, return the operand.
- return std::make_pair(OpIdx, 0U);
+ return std::pair(OpIdx, 0U);
}
// Find the suboperand number involved.
@@ -289,13 +289,13 @@ CGIOperandList::ParseOperandName(StringRef Op, bool AllowWholeOp) {
// Find the operand with the right name.
for (unsigned i = 0, e = MIOpInfo->getNumArgs(); i != e; ++i)
if (MIOpInfo->getArgNameStr(i) == SubOpName)
- return std::make_pair(OpIdx, i);
+ return std::pair(OpIdx, i);
// Otherwise, didn't find it!
PrintFatalError(TheDef->getLoc(), TheDef->getName() +
": unknown suboperand name in '" + Op +
"'");
- return std::make_pair(0U, 0U);
+ return std::pair(0U, 0U);
}
static void ParseConstraint(StringRef CStr, CGIOperandList &Ops, Record *Rec) {
diff --git a/llvm/utils/TableGen/CodeGenInstruction.h b/llvm/utils/TableGen/CodeGenInstruction.h
index ca7b1e9..11a3acd 100644
--- a/llvm/utils/TableGen/CodeGenInstruction.h
+++ b/llvm/utils/TableGen/CodeGenInstruction.h
@@ -205,7 +205,7 @@ public:
for (unsigned i = 0;; ++i) {
assert(i < OperandList.size() && "Invalid flat operand #");
if (OperandList[i].MIOperandNo + OperandList[i].MINumOperands > Op)
- return std::make_pair(i, Op - OperandList[i].MIOperandNo);
+ return std::pair(i, Op - OperandList[i].MIOperandNo);
}
}
diff --git a/llvm/utils/TableGen/CodeGenRegisters.cpp b/llvm/utils/TableGen/CodeGenRegisters.cpp
index 25f3864..5890f0f 100644
--- a/llvm/utils/TableGen/CodeGenRegisters.cpp
+++ b/llvm/utils/TableGen/CodeGenRegisters.cpp
@@ -253,11 +253,6 @@ CodeGenRegister::RegUnitList RegUnitIterator::Sentinel;
} // end anonymous namespace
-// Return true of this unit appears in RegUnits.
-static bool hasRegUnit(CodeGenRegister::RegUnitList &RegUnits, unsigned Unit) {
- return RegUnits.test(Unit);
-}
-
// Inherit register units from subregisters.
// Return true if the RegUnits changed.
bool CodeGenRegister::inheritRegUnits(CodeGenRegBank &RegBank) {
@@ -286,13 +281,13 @@ CodeGenRegister::computeSubRegs(CodeGenRegBank &RegBank) {
CodeGenSubRegIndex *Idx = ExplicitSubRegIndices[i];
if (!SR->Artificial)
Idx->Artificial = false;
- if (!SubRegs.insert(std::make_pair(Idx, SR)).second)
+ if (!SubRegs.insert(std::pair(Idx, SR)).second)
PrintFatalError(TheDef->getLoc(), "SubRegIndex " + Idx->getName() +
" appears twice in Register " +
getName());
// Map explicit sub-registers first, so the names take precedence.
// The inherited sub-registers are mapped below.
- SubReg2Idx.insert(std::make_pair(SR, Idx));
+ SubReg2Idx.insert(std::pair(SR, Idx));
}
// Keep track of inherited subregs and how they can be reached.
@@ -332,7 +327,7 @@ CodeGenRegister::computeSubRegs(CodeGenRegBank &RegBank) {
if (SubRegs.count(Comp.second) || !Orphans.erase(SRI->second))
continue;
// We found a new name for the orphaned sub-register.
- SubRegs.insert(std::make_pair(Comp.second, SRI->second));
+ SubRegs.insert(std::pair(Comp.second, SRI->second));
Indices.push_back(Comp.second);
}
}
@@ -379,7 +374,7 @@ CodeGenRegister::computeSubRegs(CodeGenRegBank &RegBank) {
// Ensure that every sub-register has a unique name.
DenseMap<const CodeGenRegister *, CodeGenSubRegIndex *>::iterator Ins =
- SubReg2Idx.insert(std::make_pair(SubReg.second, SubReg.first)).first;
+ SubReg2Idx.insert(std::pair(SubReg.second, SubReg.first)).first;
if (Ins->second == SubReg.first)
continue;
// Trouble: Two different names for SubReg.second.
@@ -525,7 +520,7 @@ void CodeGenRegister::computeSecondarySubRegs(CodeGenRegBank &RegBank) {
// a sub-register with a concatenated sub-register index.
CodeGenSubRegIndex *Concat = RegBank.getConcatSubRegIndex(Parts);
std::pair<CodeGenSubRegIndex *, CodeGenRegister *> NewSubReg =
- std::make_pair(Concat, Cand);
+ std::pair(Concat, Cand);
if (!SubRegs.insert(NewSubReg).second)
continue;
@@ -533,7 +528,7 @@ void CodeGenRegister::computeSecondarySubRegs(CodeGenRegBank &RegBank) {
// We inserted a new subregister.
NewSubRegs.push_back(NewSubReg);
SubRegQueue.push(NewSubReg);
- SubReg2Idx.insert(std::make_pair(Cand, Concat));
+ SubReg2Idx.insert(std::pair(Cand, Concat));
}
}
@@ -1079,7 +1074,7 @@ CodeGenRegisterClass::getMatchingSubClassWithSubRegs(
BitVector SuperRegClassesBV(RegClasses.size());
RC.getSuperRegClasses(SubIdx, SuperRegClassesBV);
if (SuperRegClassesBV.any())
- SuperRegClasses.push_back(std::make_pair(&RC, SuperRegClassesBV));
+ SuperRegClasses.push_back(std::pair(&RC, SuperRegClassesBV));
}
llvm::stable_sort(SuperRegClasses,
[&](const std::pair<CodeGenRegisterClass *, BitVector> &A,
@@ -1115,14 +1110,14 @@ CodeGenRegisterClass::getMatchingSubClassWithSubRegs(
// aren't subregisters of SuperRegRC whereas GR32 has a direct 1:1
// mapping.
if (SuperRegRC->getMembers().size() >= SubRegRC->getMembers().size())
- return std::make_pair(ChosenSuperRegClass, SubRegRC);
+ return std::pair(ChosenSuperRegClass, SubRegRC);
}
}
// If we found a fit but it wasn't quite ideal because SubRegRC had excess
// registers, then we're done.
if (ChosenSuperRegClass)
- return std::make_pair(ChosenSuperRegClass, SubRegRC);
+ return std::pair(ChosenSuperRegClass, SubRegRC);
}
return std::nullopt;
@@ -1235,7 +1230,7 @@ CodeGenRegBank::CodeGenRegBank(RecordKeeper &Records,
// entries? (or maybe there's a reason for it - I don't know much about this
// code, just drive-by refactoring)
RegistersByName.insert(
- std::make_pair(Reg.TheDef->getValueAsString("AsmName"), &Reg));
+ std::pair(Reg.TheDef->getValueAsString("AsmName"), &Reg));
// Precompute all sub-register maps.
// This will create Composite entries for all inferred sub-register indices.
@@ -1247,10 +1242,10 @@ CodeGenRegBank::CodeGenRegBank(RecordKeeper &Records,
for (CodeGenSubRegIndex &SRI : SubRegIndices) {
SRI.computeConcatTransitiveClosure();
if (!SRI.ConcatenationOf.empty())
- ConcatIdx.insert(std::make_pair(
- SmallVector<CodeGenSubRegIndex *, 8>(SRI.ConcatenationOf.begin(),
- SRI.ConcatenationOf.end()),
- &SRI));
+ ConcatIdx.insert(
+ std::pair(SmallVector<CodeGenSubRegIndex *, 8>(
+ SRI.ConcatenationOf.begin(), SRI.ConcatenationOf.end()),
+ &SRI));
}
// Infer even more sub-registers by combining leading super-registers.
@@ -1341,12 +1336,12 @@ CodeGenRegister *CodeGenRegBank::getReg(Record *Def) {
void CodeGenRegBank::addToMaps(CodeGenRegisterClass *RC) {
if (Record *Def = RC->getDef())
- Def2RC.insert(std::make_pair(Def, RC));
+ Def2RC.insert(std::pair(Def, RC));
// Duplicate classes are rejected by insert().
// That's OK, we only care about the properties handled by CGRC::Key.
CodeGenRegisterClass::Key K(*RC);
- Key2RC.insert(std::make_pair(K, RC));
+ Key2RC.insert(std::pair(K, RC));
}
// Create a synthetic sub-class if it is missing.
@@ -1477,7 +1472,7 @@ void CodeGenRegBank::computeComposites() {
SmallSet<CompositePair, 4> UserDefined;
for (const CodeGenSubRegIndex &Idx : SubRegIndices)
for (auto P : Idx.getComposites())
- UserDefined.insert(std::make_pair(&Idx, P.first));
+ UserDefined.insert(std::pair(&Idx, P.first));
// Keep track of TopoSigs visited. We only need to visit each TopoSig once,
// and many registers will share TopoSigs on regular architectures.
@@ -1842,9 +1837,8 @@ static bool normalizeWeight(CodeGenRegister *Reg,
// for this register, has not been used to normalize a subregister's set,
// and has not already been used to singularly determine this UberRegSet.
unsigned AdjustUnit = *Reg->getRegUnits().begin();
- if (Reg->getRegUnits().count() != 1 ||
- hasRegUnit(NormalUnits, AdjustUnit) ||
- hasRegUnit(UberSet->SingularDeterminants, AdjustUnit)) {
+ if (Reg->getRegUnits().count() != 1 || NormalUnits.test(AdjustUnit) ||
+ UberSet->SingularDeterminants.test(AdjustUnit)) {
// We don't have an adjustable unit, so adopt a new one.
AdjustUnit = RegBank.newRegUnit(UberSet->Weight - RegWeight);
Reg->adoptRegUnit(AdjustUnit);
@@ -1964,9 +1958,9 @@ void CodeGenRegBank::pruneUnitSets() {
for (unsigned i = 0, e = SuperSetIDs.size(); i != e; ++i) {
unsigned SuperIdx = SuperSetIDs[i];
PrunedUnitSets[i].Name = RegUnitSets[SuperIdx].Name;
- PrunedUnitSets[i].Units.swap(RegUnitSets[SuperIdx].Units);
+ PrunedUnitSets[i].Units = std::move(RegUnitSets[SuperIdx].Units);
}
- RegUnitSets.swap(PrunedUnitSets);
+ RegUnitSets = std::move(PrunedUnitSets);
}
// Create a RegUnitSet for each RegClass that contains all units in the class
@@ -1985,18 +1979,14 @@ void CodeGenRegBank::computeRegUnitSets() {
if (!RC.Allocatable || RC.Artificial || !RC.GeneratePressureSet)
continue;
- // Speculatively grow the RegUnitSets to hold the new set.
- RegUnitSets.resize(RegUnitSets.size() + 1);
- RegUnitSets.back().Name = RC.getName();
-
// Compute a sorted list of units in this class.
- RC.buildRegUnitSet(*this, RegUnitSets.back().Units);
+ RegUnitSet RUSet;
+ RUSet.Name = RC.getName();
+ RC.buildRegUnitSet(*this, RUSet.Units);
// Find an existing RegUnitSet.
- std::vector<RegUnitSet>::const_iterator SetI =
- findRegUnitSet(RegUnitSets, RegUnitSets.back());
- if (SetI != std::prev(RegUnitSets.end()))
- RegUnitSets.pop_back();
+ if (findRegUnitSet(RegUnitSets, RUSet) == RegUnitSets.end())
+ RegUnitSets.push_back(std::move(RUSet));
}
if (RegUnitSets.empty())
@@ -2034,38 +2024,31 @@ void CodeGenRegBank::computeRegUnitSets() {
// Compare new sets with all original classes.
for (unsigned SearchIdx = (Idx >= NumRegUnitSubSets) ? 0 : Idx + 1;
SearchIdx != EndIdx; ++SearchIdx) {
- std::set<unsigned> Intersection;
- std::set_intersection(RegUnitSets[Idx].Units.begin(),
- RegUnitSets[Idx].Units.end(),
- RegUnitSets[SearchIdx].Units.begin(),
- RegUnitSets[SearchIdx].Units.end(),
- std::inserter(Intersection, Intersection.begin()));
+ std::vector<unsigned> Intersection;
+ std::set_intersection(
+ RegUnitSets[Idx].Units.begin(), RegUnitSets[Idx].Units.end(),
+ RegUnitSets[SearchIdx].Units.begin(),
+ RegUnitSets[SearchIdx].Units.end(), std::back_inserter(Intersection));
if (Intersection.empty())
continue;
- // Speculatively grow the RegUnitSets to hold the new set.
- RegUnitSets.resize(RegUnitSets.size() + 1);
- RegUnitSets.back().Name =
+ RegUnitSet RUSet;
+ RUSet.Name =
RegUnitSets[Idx].Name + "_with_" + RegUnitSets[SearchIdx].Name;
-
std::set_union(RegUnitSets[Idx].Units.begin(),
RegUnitSets[Idx].Units.end(),
RegUnitSets[SearchIdx].Units.begin(),
RegUnitSets[SearchIdx].Units.end(),
- std::inserter(RegUnitSets.back().Units,
- RegUnitSets.back().Units.begin()));
+ std::inserter(RUSet.Units, RUSet.Units.begin()));
// Find an existing RegUnitSet, or add the union to the unique sets.
- std::vector<RegUnitSet>::const_iterator SetI =
- findRegUnitSet(RegUnitSets, RegUnitSets.back());
- if (SetI != std::prev(RegUnitSets.end()))
- RegUnitSets.pop_back();
- else {
- LLVM_DEBUG(dbgs() << "UnitSet " << RegUnitSets.size() - 1 << " "
- << RegUnitSets.back().Name << ":";
+ if (findRegUnitSet(RegUnitSets, RUSet) == RegUnitSets.end()) {
+ LLVM_DEBUG(dbgs() << "UnitSet " << RegUnitSets.size() << " "
+ << RUSet.Name << ":";
for (auto &U
- : RegUnitSets.back().Units) printRegUnitName(U);
+ : RUSet.Units) printRegUnitName(U);
dbgs() << "\n";);
+ RegUnitSets.push_back(std::move(RUSet));
}
}
}
@@ -2124,10 +2107,8 @@ void CodeGenRegBank::computeRegUnitSets() {
++UnitIdx) {
std::vector<unsigned> RUSets;
for (unsigned i = 0, e = RegUnitSets.size(); i != e; ++i) {
- RegUnitSet &RUSet = RegUnitSets[i];
- if (!is_contained(RUSet.Units, UnitIdx))
- continue;
- RUSets.push_back(i);
+ if (is_contained(RegUnitSets[i].Units, UnitIdx))
+ RUSets.push_back(i);
}
unsigned RCUnitSetsIdx = 0;
for (unsigned e = RegClassUnitSets.size(); RCUnitSetsIdx != e;
@@ -2139,8 +2120,7 @@ void CodeGenRegBank::computeRegUnitSets() {
RegUnits[UnitIdx].RegClassUnitSetsIdx = RCUnitSetsIdx;
if (RCUnitSetsIdx == RegClassUnitSets.size()) {
// Create a new list of UnitSets as a "fake" register class.
- RegClassUnitSets.resize(RCUnitSetsIdx + 1);
- RegClassUnitSets[RCUnitSetsIdx].swap(RUSets);
+ RegClassUnitSets.push_back(std::move(RUSets));
}
}
}
diff --git a/llvm/utils/TableGen/CodeGenRegisters.h b/llvm/utils/TableGen/CodeGenRegisters.h
index cfc6d87..61e8e7c 100644
--- a/llvm/utils/TableGen/CodeGenRegisters.h
+++ b/llvm/utils/TableGen/CodeGenRegisters.h
@@ -111,8 +111,7 @@ public:
CodeGenSubRegIndex *addComposite(CodeGenSubRegIndex *A,
CodeGenSubRegIndex *B) {
assert(A && B);
- std::pair<CompMap::iterator, bool> Ins =
- Composed.insert(std::make_pair(A, B));
+ std::pair<CompMap::iterator, bool> Ins = Composed.insert(std::pair(A, B));
// Synthetic subreg indices that aren't contiguous (for instance ARM
// register tuples) don't have a bit range, so it's OK to let
// B->Offset == -1. For the other cases, accumulate the offset and set
@@ -706,14 +705,13 @@ public:
// This function is only for use by CodeGenRegister::computeSuperRegs().
// Others should simply use Reg->getTopoSig().
unsigned getTopoSig(const TopoSigId &Id) {
- return TopoSigs.insert(std::make_pair(Id, TopoSigs.size())).first->second;
+ return TopoSigs.insert(std::pair(Id, TopoSigs.size())).first->second;
}
// Create a native register unit that is associated with one or two root
// registers.
unsigned newRegUnit(CodeGenRegister *R0, CodeGenRegister *R1 = nullptr) {
- RegUnits.resize(RegUnits.size() + 1);
- RegUnit &RU = RegUnits.back();
+ RegUnit &RU = RegUnits.emplace_back();
RU.Roots[0] = R0;
RU.Roots[1] = R1;
RU.Artificial = R0->Artificial;
@@ -725,8 +723,8 @@ public:
// Create a new non-native register unit that can be adopted by a register
// to increase its pressure. Note that NumNativeRegUnits is not increased.
unsigned newRegUnit(unsigned Weight) {
- RegUnits.resize(RegUnits.size() + 1);
- RegUnits.back().Weight = Weight;
+ RegUnit &RU = RegUnits.emplace_back();
+ RU.Weight = Weight;
return RegUnits.size() - 1;
}
diff --git a/llvm/utils/TableGen/CodeGenSchedule.cpp b/llvm/utils/TableGen/CodeGenSchedule.cpp
index 9cebc42..b4c6247 100644
--- a/llvm/utils/TableGen/CodeGenSchedule.cpp
+++ b/llvm/utils/TableGen/CodeGenSchedule.cpp
@@ -338,7 +338,7 @@ static void processSTIPredicate(STIPredicateFunction &Fn,
APInt DefaultProcMask(ProcModelMap.size(), 0);
APInt DefaultPredMask(NumUniquePredicates, 0);
for (std::pair<APInt, APInt> &MaskPair : OpcodeMasks)
- MaskPair = std::make_pair(DefaultProcMask, DefaultPredMask);
+ MaskPair = std::pair(DefaultProcMask, DefaultPredMask);
// Construct a OpcodeInfo object for every unique opcode declared by an
// InstructionEquivalenceClass definition.
@@ -564,7 +564,7 @@ void CodeGenSchedModels::collectProcModels() {
/// ProcessorItineraries.
void CodeGenSchedModels::addProcModel(Record *ProcDef) {
Record *ModelKey = getModelOrItinDef(ProcDef);
- if (!ProcModelMap.insert(std::make_pair(ModelKey, ProcModels.size())).second)
+ if (!ProcModelMap.insert(std::pair(ModelKey, ProcModels.size())).second)
return;
std::string Name = std::string(ModelKey->getName());
@@ -1788,7 +1788,7 @@ void CodeGenSchedModels::inferFromRW(ArrayRef<unsigned> OperWrites,
for (const PredTransition &Trans : LastTransitions)
SubstitutedAny |= Transitions.substituteVariants(Trans);
LLVM_DEBUG(Transitions.dump());
- LastTransitions.swap(Transitions.TransVec);
+ LastTransitions = std::move(Transitions.TransVec);
} while (SubstitutedAny);
// WARNING: We are about to mutate the SchedClasses vector. Do not refer to
diff --git a/llvm/utils/TableGen/CodeGenTarget.cpp b/llvm/utils/TableGen/CodeGenTarget.cpp
index 8e2957e..f26815c 100644
--- a/llvm/utils/TableGen/CodeGenTarget.cpp
+++ b/llvm/utils/TableGen/CodeGenTarget.cpp
@@ -534,8 +534,8 @@ void CodeGenTarget::ComputeInstrsByEnum() const {
[](const CodeGenInstruction *Rec1, const CodeGenInstruction *Rec2) {
const auto &D1 = *Rec1->TheDef;
const auto &D2 = *Rec2->TheDef;
- return std::make_tuple(!D1.getValueAsBit("isPseudo"), D1.getName()) <
- std::make_tuple(!D2.getValueAsBit("isPseudo"), D2.getName());
+ return std::tuple(!D1.getValueAsBit("isPseudo"), D1.getName()) <
+ std::tuple(!D2.getValueAsBit("isPseudo"), D2.getName());
});
// Assign an enum value to each instruction according to the sorted order.
diff --git a/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp b/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
index b475c98..533b8c4 100644
--- a/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
+++ b/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
@@ -73,7 +73,7 @@ class MatcherTableEmitter {
unsigned getPatternIdxFromTable(std::string &&P, std::string &&include_loc) {
const auto It = VecPatterns.find(P);
if (It == VecPatterns.end()) {
- VecPatterns.insert(make_pair(std::move(P), VecPatterns.size()));
+ VecPatterns.insert(std::pair(std::move(P), VecPatterns.size()));
VecIncludeStrings.push_back(std::move(include_loc));
return VecIncludeStrings.size() - 1;
}
diff --git a/llvm/utils/TableGen/DAGISelMatcherGen.cpp b/llvm/utils/TableGen/DAGISelMatcherGen.cpp
index 956cb5e..e8bdaba 100644
--- a/llvm/utils/TableGen/DAGISelMatcherGen.cpp
+++ b/llvm/utils/TableGen/DAGISelMatcherGen.cpp
@@ -252,7 +252,7 @@ void MatcherGen::EmitLeafMatchCode(const TreePatternNode &N) {
if (LeafRec->isSubClassOf("Register")) {
AddMatcher(new RecordMatcher("physreg input " + LeafRec->getName().str(),
NextRecordedOperandNo));
- PhysRegInputs.push_back(std::make_pair(LeafRec, NextRecordedOperandNo++));
+ PhysRegInputs.push_back(std::pair(LeafRec, NextRecordedOperandNo++));
return;
}
@@ -272,7 +272,7 @@ void MatcherGen::EmitLeafMatchCode(const TreePatternNode &N) {
// Remember this ComplexPattern so that we can emit it after all the other
// structural matches are done.
unsigned InputOperand = VariableMap[N.getName()] - 1;
- MatchedComplexPatterns.push_back(std::make_pair(&N, InputOperand));
+ MatchedComplexPatterns.push_back(std::pair(&N, InputOperand));
return;
}
@@ -313,7 +313,7 @@ void MatcherGen::EmitOperatorMatchCode(const TreePatternNode &N,
}
if (recordUniqueNode(PatternName)) {
- auto NodeAndOpNum = std::make_pair(&N, NextRecordedOperandNo - 1);
+ auto NodeAndOpNum = std::pair(&N, NextRecordedOperandNo - 1);
MatchedComplexPatterns.push_back(NodeAndOpNum);
}
diff --git a/llvm/utils/TableGen/DAGISelMatcherOpt.cpp b/llvm/utils/TableGen/DAGISelMatcherOpt.cpp
index f786d41..047d285 100644
--- a/llvm/utils/TableGen/DAGISelMatcherOpt.cpp
+++ b/llvm/utils/TableGen/DAGISelMatcherOpt.cpp
@@ -425,7 +425,7 @@ static void FactorNodes(std::unique_ptr<Matcher> &InputMatcherPtr) {
CheckOpcodeMatcher *COM = cast<CheckOpcodeMatcher>(OptionsToMatch[i]);
assert(Opcodes.insert(COM->getOpcode().getEnumName()).second &&
"Duplicate opcodes not factored?");
- Cases.push_back(std::make_pair(&COM->getOpcode(), COM->takeNext()));
+ Cases.push_back(std::pair(&COM->getOpcode(), COM->takeNext()));
delete COM;
}
@@ -462,7 +462,7 @@ static void FactorNodes(std::unique_ptr<Matcher> &InputMatcherPtr) {
}
Entry = Cases.size() + 1;
- Cases.push_back(std::make_pair(CTMTy, MatcherWithoutCTM));
+ Cases.push_back(std::pair(CTMTy, MatcherWithoutCTM));
}
// Make sure we recursively factor any scopes we may have created.
diff --git a/llvm/utils/TableGen/DFAEmitter.cpp b/llvm/utils/TableGen/DFAEmitter.cpp
index 0d22ad2..ce8cc2a 100644
--- a/llvm/utils/TableGen/DFAEmitter.cpp
+++ b/llvm/utils/TableGen/DFAEmitter.cpp
@@ -81,7 +81,7 @@ void DfaEmitter::visitDfaState(const DfaState &DS) {
sort(TI);
TI.erase(std::unique(TI.begin(), TI.end()), TI.end());
unsigned ToId = DfaStates.insert(NewStates);
- DfaTransitions.emplace(std::make_pair(FromId, A), std::make_pair(ToId, TI));
+ DfaTransitions.emplace(std::pair(FromId, A), std::pair(ToId, TI));
}
}
@@ -353,7 +353,7 @@ void CustomDfaEmitter::printActionType(raw_ostream &OS) { OS << TypeName; }
void CustomDfaEmitter::printActionValue(action_type A, raw_ostream &OS) {
const ActionTuple &AT = Actions[A];
if (AT.size() > 1)
- OS << "std::make_tuple(";
+ OS << "std::tuple(";
ListSeparator LS;
for (const auto &SingleAction : AT) {
OS << LS;
diff --git a/llvm/utils/TableGen/DXILEmitter.cpp b/llvm/utils/TableGen/DXILEmitter.cpp
index 25e818a..768e805 100644
--- a/llvm/utils/TableGen/DXILEmitter.cpp
+++ b/llvm/utils/TableGen/DXILEmitter.cpp
@@ -49,11 +49,11 @@ struct DXILOperationDesc {
StringRef Doc; // the documentation description of this instruction
SmallVector<DXILParameter> Params; // the operands that this instruction takes
- StringRef OverloadTypes; // overload types if applicable
- StringRef FnAttr; // attribute shorthands: rn=does not access
- // memory,ro=only reads from memory
+ SmallVector<ParameterKind> OverloadTypes; // overload types if applicable
+ StringRef Attr; // operation attribute; reference to string representation
+ // of llvm::Attribute::AttrKind
StringRef Intrinsic; // The llvm intrinsic map to OpName. Default is "" which
- // means no map exist
+ // means no map exists
bool IsDeriv = false; // whether this is some kind of derivative
bool IsGradient = false; // whether this requires a gradient calculation
bool IsFeedback = false; // whether this is a sampler feedback op
@@ -70,37 +70,32 @@ struct DXILOperationDesc {
int OverloadParamIndex; // parameter index which control the overload.
// When < 0, should be only 1 overload type.
SmallVector<StringRef, 4> counters; // counters for this inst.
- DXILOperationDesc(const Record *R) {
- OpName = R->getValueAsString("OpName");
- OpCode = R->getValueAsInt("OpCode");
- OpClass = R->getValueAsDef("OpClass")->getValueAsString("Name");
- Category = R->getValueAsDef("OpCategory")->getValueAsString("Name");
-
- if (R->getValue("llvm_intrinsic")) {
- auto *IntrinsicDef = R->getValueAsDef("llvm_intrinsic");
- auto DefName = IntrinsicDef->getName();
- assert(DefName.starts_with("int_") && "invalid intrinsic name");
- // Remove the int_ from intrinsic name.
- Intrinsic = DefName.substr(4);
- }
-
- Doc = R->getValueAsString("Doc");
-
- ListInit *ParamList = R->getValueAsListInit("Params");
- OverloadParamIndex = -1;
- for (unsigned I = 0; I < ParamList->size(); ++I) {
- Record *Param = ParamList->getElementAsRecord(I);
- Params.emplace_back(DXILParameter(Param));
- auto &CurParam = Params.back();
- if (CurParam.Kind >= ParameterKind::OVERLOAD)
- OverloadParamIndex = I;
- }
- OverloadTypes = R->getValueAsString("OverloadTypes");
- FnAttr = R->getValueAsString("Attributes");
- }
+ DXILOperationDesc(const Record *);
};
} // end anonymous namespace
+// Convert DXIL type name string to dxil::ParameterKind
+//
+// @param typeNameStr Type name string
+// @return ParameterKind as defined in llvm/Support/DXILABI.h
+static ParameterKind getDXILTypeNameToKind(StringRef typeNameStr) {
+ return StringSwitch<ParameterKind>(typeNameStr)
+ .Case("voidTy", ParameterKind::VOID)
+ .Case("f16Ty", ParameterKind::HALF)
+ .Case("f32Ty", ParameterKind::FLOAT)
+ .Case("f64Ty", ParameterKind::DOUBLE)
+ .Case("i1Ty", ParameterKind::I1)
+ .Case("i8Ty", ParameterKind::I8)
+ .Case("i16Ty", ParameterKind::I16)
+ .Case("i32Ty", ParameterKind::I32)
+ .Case("i64Ty", ParameterKind::I64)
+ .Case("overloadTy", ParameterKind::OVERLOAD)
+ .Case("handleTy", ParameterKind::DXIL_HANDLE)
+ .Case("cbufferRetTy", ParameterKind::CBUFFER_RET)
+ .Case("resourceRetTy", ParameterKind::RESOURCE_RET)
+ .Default(ParameterKind::INVALID);
+}
+
static ParameterKind parameterTypeNameToKind(StringRef Name) {
return StringSwitch<ParameterKind>(Name)
.Case("void", ParameterKind::VOID)
@@ -119,10 +114,44 @@ static ParameterKind parameterTypeNameToKind(StringRef Name) {
.Default(ParameterKind::INVALID);
}
+DXILOperationDesc::DXILOperationDesc(const Record *R) {
+ OpName = R->getValueAsString("OpName");
+ OpCode = R->getValueAsInt("OpCode");
+ OpClass = R->getValueAsDef("OpClass")->getValueAsString("Name");
+ Category = R->getValueAsDef("OpCategory")->getValueAsString("Name");
+
+ if (R->getValue("llvm_intrinsic")) {
+ auto *IntrinsicDef = R->getValueAsDef("llvm_intrinsic");
+ auto DefName = IntrinsicDef->getName();
+ assert(DefName.starts_with("int_") && "invalid intrinsic name");
+ // Remove the int_ from intrinsic name.
+ Intrinsic = DefName.substr(4);
+ }
+
+ Doc = R->getValueAsString("Doc");
+
+ ListInit *ParamList = R->getValueAsListInit("Params");
+ OverloadParamIndex = -1;
+ for (unsigned I = 0; I < ParamList->size(); ++I) {
+ Record *Param = ParamList->getElementAsRecord(I);
+ Params.emplace_back(DXILParameter(Param));
+ auto &CurParam = Params.back();
+ if (CurParam.Kind >= ParameterKind::OVERLOAD)
+ OverloadParamIndex = I;
+ }
+ ListInit *OverloadTypeList = R->getValueAsListInit("OverloadTypes");
+
+ for (unsigned I = 0; I < OverloadTypeList->size(); ++I) {
+ Record *R = OverloadTypeList->getElementAsRecord(I);
+ OverloadTypes.emplace_back(getDXILTypeNameToKind(R->getNameInitAsString()));
+ }
+ Attr = StringRef(R->getValue("Attribute")->getNameInitAsString());
+}
+
DXILParameter::DXILParameter(const Record *R) {
Name = R->getValueAsString("Name");
Pos = R->getValueAsInt("Pos");
- Kind = parameterTypeNameToKind(R->getValueAsString("LLVMType"));
+ Kind = parameterTypeNameToKind(R->getValueAsString("Type"));
if (R->getValue("Doc"))
Doc = R->getValueAsString("Doc");
IsConst = R->getValueAsBit("IsConstant");
@@ -216,7 +245,7 @@ static void emitDXILEnums(std::vector<DXILOperationDesc> &Ops,
std::vector<std::pair<std::string, std::string>> ClassVec;
for (auto &It : ClassMap) {
ClassVec.emplace_back(
- std::make_pair(It.getKey().str(), buildCategoryStr(It.second)));
+ std::pair(It.getKey().str(), buildCategoryStr(It.second)));
}
// Sort by Category + ClassName.
llvm::sort(ClassVec, [](std::pair<std::string, std::string> &A,
@@ -267,38 +296,51 @@ static void emitDXILIntrinsicMap(std::vector<DXILOperationDesc> &Ops,
OS << "\n";
}
-static std::string emitDXILOperationFnAttr(StringRef FnAttr) {
- return StringSwitch<std::string>(FnAttr)
- .Case("rn", "Attribute::ReadNone")
- .Case("ro", "Attribute::ReadOnly")
+// Convert operation attribute string to Attribute enum
+//
+// @param Attr string reference
+// @return std::string Attribute enum string
+static std::string emitDXILOperationAttr(StringRef Attr) {
+ return StringSwitch<std::string>(Attr)
+ .Case("ReadNone", "Attribute::ReadNone")
+ .Case("ReadOnly", "Attribute::ReadOnly")
.Default("Attribute::None");
}
-static std::string getOverloadKind(StringRef Overload) {
- return StringSwitch<std::string>(Overload)
- .Case("half", "OverloadKind::HALF")
- .Case("float", "OverloadKind::FLOAT")
- .Case("double", "OverloadKind::DOUBLE")
- .Case("i1", "OverloadKind::I1")
- .Case("i16", "OverloadKind::I16")
- .Case("i32", "OverloadKind::I32")
- .Case("i64", "OverloadKind::I64")
- .Case("udt", "OverloadKind::UserDefineType")
- .Case("obj", "OverloadKind::ObjectType")
- .Default("OverloadKind::VOID");
+static std::string overloadKindStr(ParameterKind Overload) {
+ switch (Overload) {
+ case ParameterKind::HALF:
+ return "OverloadKind::HALF";
+ case ParameterKind::FLOAT:
+ return "OverloadKind::FLOAT";
+ case ParameterKind::DOUBLE:
+ return "OverloadKind::DOUBLE";
+ case ParameterKind::I1:
+ return "OverloadKind::I1";
+ case ParameterKind::I8:
+ return "OverloadKind::I8";
+ case ParameterKind::I16:
+ return "OverloadKind::I16";
+ case ParameterKind::I32:
+ return "OverloadKind::I32";
+ case ParameterKind::I64:
+ return "OverloadKind::I64";
+ case ParameterKind::VOID:
+ return "OverloadKind::VOID";
+ default:
+ return "OverloadKind::UNKNOWN";
+ }
}
-static std::string getDXILOperationOverload(StringRef Overloads) {
- SmallVector<StringRef> OverloadStrs;
- Overloads.split(OverloadStrs, ';', /*MaxSplit*/ -1, /*KeepEmpty*/ false);
+static std::string
+getDXILOperationOverloads(SmallVector<ParameterKind> Overloads) {
// Format is: OverloadKind::FLOAT | OverloadKind::HALF
- assert(!OverloadStrs.empty() && "Invalid overloads");
- auto It = OverloadStrs.begin();
+ auto It = Overloads.begin();
std::string Result;
raw_string_ostream OS(Result);
- OS << getOverloadKind(*It);
- for (++It; It != OverloadStrs.end(); ++It) {
- OS << " | " << getOverloadKind(*It);
+ OS << overloadKindStr(*It);
+ for (++It; It != Overloads.end(); ++It) {
+ OS << " | " << overloadKindStr(*It);
}
return OS.str();
}
@@ -368,8 +410,8 @@ static void emitDXILOperationTable(std::vector<DXILOperationDesc> &Ops,
OS << " { dxil::OpCode::" << Op.OpName << ", "
<< OpStrings.get(Op.OpName.str()) << ", OpCodeClass::" << Op.OpClass
<< ", " << OpClassStrings.get(getDXILOpClassName(Op.OpClass)) << ", "
- << getDXILOperationOverload(Op.OverloadTypes) << ", "
- << emitDXILOperationFnAttr(Op.FnAttr) << ", " << Op.OverloadParamIndex
+ << getDXILOperationOverloads(Op.OverloadTypes) << ", "
+ << emitDXILOperationAttr(Op.Attr) << ", " << Op.OverloadParamIndex
<< ", " << Op.Params.size() << ", "
<< Parameters.get(ParameterMap[Op.OpClass]) << " },\n";
}
diff --git a/llvm/utils/TableGen/DecoderEmitter.cpp b/llvm/utils/TableGen/DecoderEmitter.cpp
index 2f28ccb..02d9527 100644
--- a/llvm/utils/TableGen/DecoderEmitter.cpp
+++ b/llvm/utils/TableGen/DecoderEmitter.cpp
@@ -614,7 +614,7 @@ void Filter::recurse() {
// Delegates to an inferior filter chooser for further processing on this
// group of instructions whose segment values are variable.
- FilterChooserMap.insert(std::make_pair(
+ FilterChooserMap.insert(std::pair(
NO_FIXED_SEGMENTS_SENTINEL,
std::make_unique<FilterChooser>(Owner->AllInstructions,
VariableInstructions, Owner->Operands,
@@ -641,10 +641,10 @@ void Filter::recurse() {
// Delegates to an inferior filter chooser for further processing on this
// category of instructions.
- FilterChooserMap.insert(std::make_pair(
- Inst.first, std::make_unique<FilterChooser>(
- Owner->AllInstructions, Inst.second, Owner->Operands,
- BitValueArray, *Owner)));
+ FilterChooserMap.insert(
+ std::pair(Inst.first, std::make_unique<FilterChooser>(
+ Owner->AllInstructions, Inst.second,
+ Owner->Operands, BitValueArray, *Owner)));
}
}
@@ -1908,7 +1908,7 @@ void parseVarLenInstOperand(const Record &Def,
int TiedReg = TiedTo[OpSubOpPair.first];
if (TiedReg != -1) {
unsigned OpIdx = CGI.Operands.getFlattenedOperandNumber(
- std::make_pair(TiedReg, OpSubOpPair.second));
+ std::pair(TiedReg, OpSubOpPair.second));
Operands[OpIdx].addField(CurrBitPos, EncodingSegment.BitWidth, Offset);
}
}
@@ -2005,11 +2005,9 @@ populateInstruction(CodeGenTarget &Target, const Record &EncodingDef,
DagInit *Out = Def.getValueAsDag("OutOperandList");
DagInit *In = Def.getValueAsDag("InOperandList");
for (unsigned i = 0; i < Out->getNumArgs(); ++i)
- InOutOperands.push_back(
- std::make_pair(Out->getArg(i), Out->getArgNameStr(i)));
+ InOutOperands.push_back(std::pair(Out->getArg(i), Out->getArgNameStr(i)));
for (unsigned i = 0; i < In->getNumArgs(); ++i)
- InOutOperands.push_back(
- std::make_pair(In->getArg(i), In->getArgNameStr(i)));
+ InOutOperands.push_back(std::pair(In->getArg(i), In->getArgNameStr(i)));
// Search for tied operands, so that we can correctly instantiate
// operands that are not explicitly represented in the encoding.
@@ -2545,7 +2543,7 @@ void DecoderEmitter::run(raw_ostream &o) {
if (!NumberedEncodings[i].HwModeName.empty())
DecoderNamespace +=
std::string("_") + NumberedEncodings[i].HwModeName.str();
- OpcMap[std::make_pair(DecoderNamespace, Size)].emplace_back(
+ OpcMap[std::pair(DecoderNamespace, Size)].emplace_back(
i, IndexOfInstruction.find(Def)->second);
} else {
NumEncodingsOmitted++;
diff --git a/llvm/utils/TableGen/FastISelEmitter.cpp b/llvm/utils/TableGen/FastISelEmitter.cpp
index 00a1650..f04c6e3 100644
--- a/llvm/utils/TableGen/FastISelEmitter.cpp
+++ b/llvm/utils/TableGen/FastISelEmitter.cpp
@@ -595,7 +595,7 @@ void FastISelMap::collectPatterns(CodeGenDAGPatterns &CGP) {
int complexity = Pattern.getPatternComplexity(CGP);
auto inserted_simple_pattern = SimplePatternsCheck.insert(
- std::make_tuple(Operands, OpcodeName, VT, RetVT, PredicateCheck));
+ std::tuple(Operands, OpcodeName, VT, RetVT, PredicateCheck));
if (!inserted_simple_pattern.second) {
PrintFatalError(Pattern.getSrcRecord()->getLoc(),
"Duplicate predicate in FastISel table!");
diff --git a/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp b/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp
index d9249cf..dee3cb4 100644
--- a/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp
+++ b/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp
@@ -2956,8 +2956,8 @@ GICombinerEmitter::buildMatchTable(MutableArrayRef<RuleMatcher> Rules) {
const Matcher *B) {
auto *L = static_cast<const RuleMatcher *>(A);
auto *R = static_cast<const RuleMatcher *>(B);
- return std::make_tuple(OpcodeOrder[L->getOpcode()], L->getNumOperands()) <
- std::make_tuple(OpcodeOrder[R->getOpcode()], R->getNumOperands());
+ return std::tuple(OpcodeOrder[L->getOpcode()], L->getNumOperands()) <
+ std::tuple(OpcodeOrder[R->getOpcode()], R->getNumOperands());
});
for (Matcher *Rule : InputRules)
diff --git a/llvm/utils/TableGen/GlobalISelEmitter.cpp b/llvm/utils/TableGen/GlobalISelEmitter.cpp
index 13f2384..618cb2f 100644
--- a/llvm/utils/TableGen/GlobalISelEmitter.cpp
+++ b/llvm/utils/TableGen/GlobalISelEmitter.cpp
@@ -2215,8 +2215,8 @@ GlobalISelEmitter::buildMatchTable(MutableArrayRef<RuleMatcher> Rules,
const Matcher *B) {
auto *L = static_cast<const RuleMatcher *>(A);
auto *R = static_cast<const RuleMatcher *>(B);
- return std::make_tuple(OpcodeOrder[L->getOpcode()], L->getNumOperands()) <
- std::make_tuple(OpcodeOrder[R->getOpcode()], R->getNumOperands());
+ return std::tuple(OpcodeOrder[L->getOpcode()], L->getNumOperands()) <
+ std::tuple(OpcodeOrder[R->getOpcode()], R->getNumOperands());
});
for (Matcher *Rule : InputRules)
diff --git a/llvm/utils/TableGen/GlobalISelMatchTable.cpp b/llvm/utils/TableGen/GlobalISelMatchTable.cpp
index f7166ea..45fb41b 100644
--- a/llvm/utils/TableGen/GlobalISelMatchTable.cpp
+++ b/llvm/utils/TableGen/GlobalISelMatchTable.cpp
@@ -408,18 +408,18 @@ bool LLTCodeGen::operator<(const LLTCodeGen &Other) const {
return Ty.getAddressSpace() < Other.Ty.getAddressSpace();
if (Ty.isVector() && Ty.getElementCount() != Other.Ty.getElementCount())
- return std::make_tuple(Ty.isScalable(),
- Ty.getElementCount().getKnownMinValue()) <
- std::make_tuple(Other.Ty.isScalable(),
- Other.Ty.getElementCount().getKnownMinValue());
+ return std::tuple(Ty.isScalable(),
+ Ty.getElementCount().getKnownMinValue()) <
+ std::tuple(Other.Ty.isScalable(),
+ Other.Ty.getElementCount().getKnownMinValue());
assert((!Ty.isVector() || Ty.isScalable() == Other.Ty.isScalable()) &&
"Unexpected mismatch of scalable property");
return Ty.isVector()
- ? std::make_tuple(Ty.isScalable(),
- Ty.getSizeInBits().getKnownMinValue()) <
- std::make_tuple(Other.Ty.isScalable(),
- Other.Ty.getSizeInBits().getKnownMinValue())
+ ? std::tuple(Ty.isScalable(),
+ Ty.getSizeInBits().getKnownMinValue()) <
+ std::tuple(Other.Ty.isScalable(),
+ Other.Ty.getSizeInBits().getKnownMinValue())
: Ty.getSizeInBits().getFixedValue() <
Other.Ty.getSizeInBits().getFixedValue();
}
@@ -545,8 +545,8 @@ void GroupMatcher::optimize() {
if (T != E)
F = ++T;
}
- optimizeRules<GroupMatcher>(Matchers, MatcherStorage).swap(Matchers);
- optimizeRules<SwitchMatcher>(Matchers, MatcherStorage).swap(Matchers);
+ Matchers = optimizeRules<GroupMatcher>(Matchers, MatcherStorage);
+ Matchers = optimizeRules<SwitchMatcher>(Matchers, MatcherStorage);
}
//===- SwitchMatcher ------------------------------------------------------===//
@@ -720,8 +720,8 @@ void RuleMatcher::optimize() {
}
llvm::sort(EpilogueMatchers, [](const std::unique_ptr<PredicateMatcher> &L,
const std::unique_ptr<PredicateMatcher> &R) {
- return std::make_tuple(L->getKind(), L->getInsnVarID(), L->getOpIdx()) <
- std::make_tuple(R->getKind(), R->getInsnVarID(), R->getOpIdx());
+ return std::tuple(L->getKind(), L->getInsnVarID(), L->getOpIdx()) <
+ std::tuple(R->getKind(), R->getInsnVarID(), R->getOpIdx());
});
}
@@ -822,7 +822,7 @@ Error RuleMatcher::defineComplexSubOperand(StringRef SymbolicName,
}
ComplexSubOperands[SymbolicName] =
- std::make_tuple(ComplexPattern, RendererID, SubOperandID);
+ std::tuple(ComplexPattern, RendererID, SubOperandID);
ComplexSubOperandsParentName[SymbolicName] = ParentName;
return Error::success();
diff --git a/llvm/utils/TableGen/GlobalISelMatchTable.h b/llvm/utils/TableGen/GlobalISelMatchTable.h
index 635552f..b1ab7da 100644
--- a/llvm/utils/TableGen/GlobalISelMatchTable.h
+++ b/llvm/utils/TableGen/GlobalISelMatchTable.h
@@ -232,7 +232,7 @@ public:
unsigned allocateLabelID() { return CurrentLabelID++; }
void defineLabel(unsigned LabelID) {
- LabelMap.insert(std::make_pair(LabelID, CurrentSize));
+ LabelMap.insert(std::pair(LabelID, CurrentSize));
}
unsigned getLabelIndex(unsigned LabelID) const {
diff --git a/llvm/utils/TableGen/InfoByHwMode.cpp b/llvm/utils/TableGen/InfoByHwMode.cpp
index 6d9a35a..4a64421 100644
--- a/llvm/utils/TableGen/InfoByHwMode.cpp
+++ b/llvm/utils/TableGen/InfoByHwMode.cpp
@@ -69,9 +69,9 @@ MVT &ValueTypeByHwMode::getOrCreateTypeForMode(unsigned Mode, MVT Type) {
// make a copy of it for Mode and return it.
auto D = Map.begin();
if (D != Map.end() && D->first == DefaultMode)
- return Map.insert(std::make_pair(Mode, D->second)).first->second;
+ return Map.insert(std::pair(Mode, D->second)).first->second;
// If default mode is not present either, use provided Type.
- return Map.insert(std::make_pair(Mode, Type)).first->second;
+ return Map.insert(std::pair(Mode, Type)).first->second;
}
StringRef ValueTypeByHwMode::getMVTName(MVT T) {
diff --git a/llvm/utils/TableGen/InfoByHwMode.h b/llvm/utils/TableGen/InfoByHwMode.h
index 5f53295..001509e 100644
--- a/llvm/utils/TableGen/InfoByHwMode.h
+++ b/llvm/utils/TableGen/InfoByHwMode.h
@@ -144,7 +144,7 @@ template <typename InfoT> struct InfoByHwMode {
assert(hasMode(Mode) || hasDefault());
InfoT I = get(Mode);
Map.clear();
- Map.insert(std::make_pair(DefaultMode, I));
+ Map.insert(std::pair(DefaultMode, I));
}
protected:
@@ -209,7 +209,7 @@ struct RegSizeInfoByHwMode : public InfoByHwMode<RegSizeInfo> {
void writeToStream(raw_ostream &OS) const;
void insertRegSizeForMode(unsigned Mode, RegSizeInfo Info) {
- Map.insert(std::make_pair(Mode, Info));
+ Map.insert(std::pair(Mode, Info));
}
};
diff --git a/llvm/utils/TableGen/IntrinsicEmitter.cpp b/llvm/utils/TableGen/IntrinsicEmitter.cpp
index f7ae5ed..50a34ea 100644
--- a/llvm/utils/TableGen/IntrinsicEmitter.cpp
+++ b/llvm/utils/TableGen/IntrinsicEmitter.cpp
@@ -637,7 +637,7 @@ void IntrinsicEmitter::EmitIntrinsicToBuiltinMap(
std::map<std::string, std::string> &BIM =
BuiltinMap[Ints[i].TargetPrefix];
- if (!BIM.insert(std::make_pair(BuiltinName, Ints[i].EnumName)).second)
+ if (!BIM.insert(std::pair(BuiltinName, Ints[i].EnumName)).second)
PrintFatalError(Ints[i].TheDef->getLoc(),
"Intrinsic '" + Ints[i].TheDef->getName() +
"': duplicate " + CompilerName + " builtin name!");
diff --git a/llvm/utils/TableGen/OptParserEmitter.cpp b/llvm/utils/TableGen/OptParserEmitter.cpp
index 0f08119..c25f6c5 100644
--- a/llvm/utils/TableGen/OptParserEmitter.cpp
+++ b/llvm/utils/TableGen/OptParserEmitter.cpp
@@ -207,14 +207,14 @@ static void EmitOptParser(RecordKeeper &Records, raw_ostream &OS) {
typedef SmallVector<SmallString<2>, 2> PrefixKeyT;
typedef std::map<PrefixKeyT, std::string> PrefixesT;
PrefixesT Prefixes;
- Prefixes.insert(std::make_pair(PrefixKeyT(), "prefix_0"));
+ Prefixes.insert(std::pair(PrefixKeyT(), "prefix_0"));
unsigned CurPrefix = 0;
for (const Record &R : llvm::make_pointee_range(Opts)) {
std::vector<StringRef> RPrefixes = R.getValueAsListOfStrings("Prefixes");
PrefixKeyT PrefixKey(RPrefixes.begin(), RPrefixes.end());
unsigned NewPrefix = CurPrefix + 1;
std::string Prefix = (Twine("prefix_") + Twine(NewPrefix)).str();
- if (Prefixes.insert(std::make_pair(PrefixKey, Prefix)).second)
+ if (Prefixes.insert(std::pair(PrefixKey, Prefix)).second)
CurPrefix = NewPrefix;
}
diff --git a/llvm/utils/TableGen/SearchableTableEmitter.cpp b/llvm/utils/TableGen/SearchableTableEmitter.cpp
index 0cce798a..5bab4ff 100644
--- a/llvm/utils/TableGen/SearchableTableEmitter.cpp
+++ b/llvm/utils/TableGen/SearchableTableEmitter.cpp
@@ -608,7 +608,7 @@ void SearchableTableEmitter::collectEnumEntries(
Value = getInt(EntryRec, ValueField);
Enum.Entries.push_back(std::make_unique<GenericEnum::Entry>(Name, Value));
- Enum.EntryMap.insert(std::make_pair(EntryRec, Enum.Entries.back().get()));
+ Enum.EntryMap.insert(std::pair(EntryRec, Enum.Entries.back().get()));
}
if (ValueField.empty()) {
@@ -708,7 +708,7 @@ void SearchableTableEmitter::run(raw_ostream &OS) {
collectEnumEntries(*Enum, NameField, ValueField,
Records.getAllDerivedDefinitions(FilterClass));
- EnumMap.insert(std::make_pair(EnumRec, Enum.get()));
+ EnumMap.insert(std::pair(EnumRec, Enum.get()));
Enums.emplace_back(std::move(Enum));
}
@@ -773,7 +773,7 @@ void SearchableTableEmitter::run(raw_ostream &OS) {
});
}
- TableMap.insert(std::make_pair(TableRec, Table.get()));
+ TableMap.insert(std::pair(TableRec, Table.get()));
Tables.emplace_back(std::move(Table));
}
diff --git a/llvm/utils/TableGen/SequenceToOffsetTable.h b/llvm/utils/TableGen/SequenceToOffsetTable.h
index 7db39a9..5766b68 100644
--- a/llvm/utils/TableGen/SequenceToOffsetTable.h
+++ b/llvm/utils/TableGen/SequenceToOffsetTable.h
@@ -87,7 +87,7 @@ public:
if (I != Seqs.end() && isSuffix(Seq, I->first))
return;
- I = Seqs.insert(I, std::make_pair(Seq, 0u));
+ I = Seqs.insert(I, std::pair(Seq, 0u));
// The entry before I may be a suffix of Seq that can now be erased.
if (I != Seqs.begin() && isSuffix((--I)->first, Seq))
diff --git a/llvm/utils/TableGen/SubtargetEmitter.cpp b/llvm/utils/TableGen/SubtargetEmitter.cpp
index b1502ea..2707f54 100644
--- a/llvm/utils/TableGen/SubtargetEmitter.cpp
+++ b/llvm/utils/TableGen/SubtargetEmitter.cpp
@@ -486,11 +486,10 @@ void SubtargetEmitter::EmitStageAndOperandCycleData(
std::map<std::string, unsigned> ItinStageMap, ItinOperandMap;
for (const CodeGenProcModel &ProcModel : SchedModels.procModels()) {
// Add process itinerary to the list.
- ProcItinLists.resize(ProcItinLists.size() + 1);
+ std::vector<InstrItinerary> &ItinList = ProcItinLists.emplace_back();
// If this processor defines no itineraries, then leave the itinerary list
// empty.
- std::vector<InstrItinerary> &ItinList = ProcItinLists.back();
if (!ProcModel.hasItineraries())
continue;
@@ -1029,17 +1028,16 @@ void SubtargetEmitter::ExpandProcResources(
// tables. Must be called for each processor in order.
void SubtargetEmitter::GenSchedClassTables(const CodeGenProcModel &ProcModel,
SchedClassTables &SchedTables) {
- SchedTables.ProcSchedClasses.resize(SchedTables.ProcSchedClasses.size() + 1);
+ std::vector<MCSchedClassDesc> &SCTab =
+ SchedTables.ProcSchedClasses.emplace_back();
if (!ProcModel.hasInstrSchedModel())
return;
- std::vector<MCSchedClassDesc> &SCTab = SchedTables.ProcSchedClasses.back();
LLVM_DEBUG(dbgs() << "\n+++ SCHED CLASSES (GenSchedClassTables) +++\n");
for (const CodeGenSchedClass &SC : SchedModels.schedClasses()) {
LLVM_DEBUG(SC.dump(&SchedModels));
- SCTab.resize(SCTab.size() + 1);
- MCSchedClassDesc &SCDesc = SCTab.back();
+ MCSchedClassDesc &SCDesc = SCTab.emplace_back();
// SCDesc.Name is guarded by NDEBUG
SCDesc.NumMicroOps = 0;
SCDesc.BeginGroup = false;
@@ -1649,7 +1647,7 @@ static void collectProcessorIndices(const CodeGenSchedClass &SC,
IdxVec PI;
std::set_union(&T.ProcIndex, &T.ProcIndex + 1, ProcIndices.begin(),
ProcIndices.end(), std::back_inserter(PI));
- ProcIndices.swap(PI);
+ ProcIndices = std::move(PI);
}
}
diff --git a/llvm/utils/TableGen/WebAssemblyDisassemblerEmitter.cpp b/llvm/utils/TableGen/WebAssemblyDisassemblerEmitter.cpp
index 2cf86d3..928129f 100644
--- a/llvm/utils/TableGen/WebAssemblyDisassemblerEmitter.cpp
+++ b/llvm/utils/TableGen/WebAssemblyDisassemblerEmitter.cpp
@@ -80,7 +80,7 @@ void emitWebAssemblyDisassemblerTables(
}
}
// Set this instruction as the one to use.
- CGIP = std::make_pair(I, &CGI);
+ CGIP = std::pair(I, &CGI);
}
OS << "#include \"MCTargetDesc/WebAssemblyMCTargetDesc.h\"\n";
OS << "\n";
diff --git a/llvm/utils/TableGen/X86CompressEVEXTablesEmitter.cpp b/llvm/utils/TableGen/X86CompressEVEXTablesEmitter.cpp
index e4db995..b96d16b 100644
--- a/llvm/utils/TableGen/X86CompressEVEXTablesEmitter.cpp
+++ b/llvm/utils/TableGen/X86CompressEVEXTablesEmitter.cpp
@@ -120,12 +120,11 @@ public:
RecognizableInstrBase OldRI(*OldInst);
// Return false if any of the following fields of does not match.
- if (std::make_tuple(OldRI.IsCodeGenOnly, OldRI.OpMap, NewRI.OpPrefix,
- OldRI.HasVEX_4V, OldRI.HasVEX_L, OldRI.HasREX_W,
- OldRI.Form) !=
- std::make_tuple(NewRI.IsCodeGenOnly, NewRI.OpMap, OldRI.OpPrefix,
- NewRI.HasVEX_4V, NewRI.HasVEX_L, NewRI.HasREX_W,
- NewRI.Form))
+ if (std::tuple(OldRI.IsCodeGenOnly, OldRI.OpMap, NewRI.OpPrefix,
+ OldRI.HasVEX_4V, OldRI.HasVEX_L, OldRI.HasREX_W,
+ OldRI.Form) !=
+ std::tuple(NewRI.IsCodeGenOnly, NewRI.OpMap, OldRI.OpPrefix,
+ NewRI.HasVEX_4V, NewRI.HasVEX_L, NewRI.HasREX_W, NewRI.Form))
return false;
for (unsigned I = 0, E = OldInst->Operands.size(); I < E; ++I) {
@@ -219,7 +218,7 @@ void X86CompressEVEXTablesEmitter::run(raw_ostream &OS) {
if (!NewInst)
continue;
- Table.push_back(std::make_pair(Inst, NewInst));
+ Table.push_back(std::pair(Inst, NewInst));
auto Predicates = NewInst->TheDef->getValueAsListOfDefs("Predicates");
auto It = llvm::find_if(Predicates, [](const Record *R) {
StringRef Name = R->getName();
diff --git a/llvm/utils/TableGen/X86DisassemblerTables.cpp b/llvm/utils/TableGen/X86DisassemblerTables.cpp
index 588d9b2..a48b9cf 100644
--- a/llvm/utils/TableGen/X86DisassemblerTables.cpp
+++ b/llvm/utils/TableGen/X86DisassemblerTables.cpp
@@ -845,7 +845,7 @@ void DisassemblerTables::emitInstructionInfo(raw_ostream &o,
for (auto Operand : InstructionSpecifiers[Index].operands) {
OperandEncoding Encoding = (OperandEncoding)Operand.encoding;
OperandType Type = (OperandType)Operand.type;
- OperandList.push_back(std::make_pair(Encoding, Type));
+ OperandList.push_back(std::pair(Encoding, Type));
}
unsigned &N = OperandSets[OperandList];
if (N != 0)
@@ -877,7 +877,7 @@ void DisassemblerTables::emitInstructionInfo(raw_ostream &o,
for (auto Operand : InstructionSpecifiers[index].operands) {
OperandEncoding Encoding = (OperandEncoding)Operand.encoding;
OperandType Type = (OperandType)Operand.type;
- OperandList.push_back(std::make_pair(Encoding, Type));
+ OperandList.push_back(std::pair(Encoding, Type));
}
o.indent(i * 2) << (OperandSets[OperandList] - 1) << ",\n";
diff --git a/llvm/utils/TableGen/X86FoldTablesEmitter.cpp b/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
index 44c2817..1319042 100644
--- a/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
+++ b/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
@@ -171,8 +171,8 @@ class X86FoldTablesEmitter {
assert(LHS && RHS && "LHS and RHS shouldn't be nullptr");
const auto &D1 = *LHS->TheDef;
const auto &D2 = *RHS->TheDef;
- return std::make_tuple(!D1.getValueAsBit("isPseudo"), D1.getName()) <
- std::make_tuple(!D2.getValueAsBit("isPseudo"), D2.getName());
+ return std::tuple(!D1.getValueAsBit("isPseudo"), D1.getName()) <
+ std::tuple(!D2.getValueAsBit("isPseudo"), D2.getName());
}
};
@@ -372,22 +372,20 @@ public:
return false;
// Return false if any of the following fields of does not match.
- if (std::make_tuple(RegRI.Encoding, RegRI.Opcode, RegRI.OpPrefix,
- RegRI.OpMap, RegRI.OpSize, RegRI.AdSize, RegRI.HasREX_W,
- RegRI.HasVEX_4V, RegRI.HasVEX_L, RegRI.IgnoresVEX_L,
- RegRI.IgnoresW, RegRI.HasEVEX_K, RegRI.HasEVEX_KZ,
- RegRI.HasEVEX_L2, RegRI.HasEVEX_NF,
- RegRec->getValueAsBit("hasEVEX_RC"),
- RegRec->getValueAsBit("hasLockPrefix"),
- RegRec->getValueAsBit("hasNoTrackPrefix")) !=
- std::make_tuple(MemRI.Encoding, MemRI.Opcode, MemRI.OpPrefix,
- MemRI.OpMap, MemRI.OpSize, MemRI.AdSize, MemRI.HasREX_W,
- MemRI.HasVEX_4V, MemRI.HasVEX_L, MemRI.IgnoresVEX_L,
- MemRI.IgnoresW, MemRI.HasEVEX_K, MemRI.HasEVEX_KZ,
- MemRI.HasEVEX_L2, MemRI.HasEVEX_NF,
- MemRec->getValueAsBit("hasEVEX_RC"),
- MemRec->getValueAsBit("hasLockPrefix"),
- MemRec->getValueAsBit("hasNoTrackPrefix")))
+ if (std::tuple(RegRI.Encoding, RegRI.Opcode, RegRI.OpPrefix, RegRI.OpMap,
+ RegRI.OpSize, RegRI.AdSize, RegRI.HasREX_W, RegRI.HasVEX_4V,
+ RegRI.HasVEX_L, RegRI.IgnoresVEX_L, RegRI.IgnoresW,
+ RegRI.HasEVEX_K, RegRI.HasEVEX_KZ, RegRI.HasEVEX_L2,
+ RegRI.HasEVEX_NF, RegRec->getValueAsBit("hasEVEX_RC"),
+ RegRec->getValueAsBit("hasLockPrefix"),
+ RegRec->getValueAsBit("hasNoTrackPrefix")) !=
+ std::tuple(MemRI.Encoding, MemRI.Opcode, MemRI.OpPrefix, MemRI.OpMap,
+ MemRI.OpSize, MemRI.AdSize, MemRI.HasREX_W, MemRI.HasVEX_4V,
+ MemRI.HasVEX_L, MemRI.IgnoresVEX_L, MemRI.IgnoresW,
+ MemRI.HasEVEX_K, MemRI.HasEVEX_KZ, MemRI.HasEVEX_L2,
+ MemRI.HasEVEX_NF, MemRec->getValueAsBit("hasEVEX_RC"),
+ MemRec->getValueAsBit("hasLockPrefix"),
+ MemRec->getValueAsBit("hasNoTrackPrefix")))
return false;
// Make sure the sizes of the operands of both instructions suit each other.
diff --git a/llvm/utils/gn/secondary/clang/lib/AST/BUILD.gn b/llvm/utils/gn/secondary/clang/lib/AST/BUILD.gn
index ff79319..14cf759 100644
--- a/llvm/utils/gn/secondary/clang/lib/AST/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang/lib/AST/BUILD.gn
@@ -141,6 +141,7 @@ static_library("AST") {
"StmtCXX.cpp",
"StmtIterator.cpp",
"StmtObjC.cpp",
+ "StmtOpenACC.cpp",
"StmtOpenMP.cpp",
"StmtPrinter.cpp",
"StmtProfile.cpp",
diff --git a/llvm/utils/gn/secondary/clang/lib/Frontend/BUILD.gn b/llvm/utils/gn/secondary/clang/lib/Frontend/BUILD.gn
index 948d140..5c4d7e1 100644
--- a/llvm/utils/gn/secondary/clang/lib/Frontend/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang/lib/Frontend/BUILD.gn
@@ -8,6 +8,7 @@ static_library("Frontend") {
"//clang/lib/Basic",
"//clang/lib/Driver",
"//clang/lib/Edit",
+ "//clang/lib/InstallAPI",
"//clang/lib/Lex",
"//clang/lib/Parse",
"//clang/lib/Sema",
@@ -18,6 +19,7 @@ static_library("Frontend") {
"//llvm/lib/ProfileData",
"//llvm/lib/Support",
"//llvm/lib/TargetParser",
+ "//llvm/lib/TextAPI",
]
sources = [
"ASTConsumers.cpp",
@@ -38,6 +40,7 @@ static_library("Frontend") {
"InitPreprocessor.cpp",
"InterfaceStubFunctionsConsumer.cpp",
"LayoutOverrideSource.cpp",
+ "InstallAPIConsumer.cpp",
"LogDiagnosticPrinter.cpp",
"ModuleDependencyCollector.cpp",
"MultiplexConsumer.cpp",
diff --git a/llvm/utils/gn/secondary/clang/lib/InstallAPI/BUILD.gn b/llvm/utils/gn/secondary/clang/lib/InstallAPI/BUILD.gn
new file mode 100644
index 0000000..4d79ac8
--- /dev/null
+++ b/llvm/utils/gn/secondary/clang/lib/InstallAPI/BUILD.gn
@@ -0,0 +1,10 @@
+static_library("InstallAPI") {
+ output_name = "clangInstallAPI"
+ configs += [ "//llvm/utils/gn/build:clang_code" ]
+ deps = [
+ "//clang/lib/AST",
+ "//llvm/lib/Support",
+ "//llvm/lib/TextAPI",
+ ]
+ sources = [ "Context.cpp" ]
+}
diff --git a/llvm/utils/gn/secondary/clang/test/BUILD.gn b/llvm/utils/gn/secondary/clang/test/BUILD.gn
index 3e19ee5d..c7df803 100644
--- a/llvm/utils/gn/secondary/clang/test/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang/test/BUILD.gn
@@ -187,6 +187,7 @@ group("test") {
"//llvm/tools/llvm-profdata",
"//llvm/tools/llvm-rc:symlinks",
"//llvm/tools/llvm-readobj:symlinks",
+ "//llvm/tools/llvm-readtapi:symlinks",
"//llvm/tools/llvm-symbolizer:symlinks",
"//llvm/tools/opt",
"//llvm/tools/yaml2obj",
diff --git a/llvm/utils/gn/secondary/libcxx/include/BUILD.gn b/llvm/utils/gn/secondary/libcxx/include/BUILD.gn
index 45125ce..b7c7de9 100644
--- a/llvm/utils/gn/secondary/libcxx/include/BUILD.gn
+++ b/llvm/utils/gn/secondary/libcxx/include/BUILD.gn
@@ -188,6 +188,7 @@ if (current_toolchain == default_toolchain) {
"__algorithm/ranges_binary_search.h",
"__algorithm/ranges_clamp.h",
"__algorithm/ranges_contains.h",
+ "__algorithm/ranges_contains_subrange.h",
"__algorithm/ranges_copy.h",
"__algorithm/ranges_copy_backward.h",
"__algorithm/ranges_copy_if.h",
diff --git a/llvm/utils/gn/secondary/lld/ELF/BUILD.gn b/llvm/utils/gn/secondary/lld/ELF/BUILD.gn
index bd4a4f5..d903725 100644
--- a/llvm/utils/gn/secondary/lld/ELF/BUILD.gn
+++ b/llvm/utils/gn/secondary/lld/ELF/BUILD.gn
@@ -39,6 +39,7 @@ static_library("ELF") {
"Arch/PPC64.cpp",
"Arch/RISCV.cpp",
"Arch/SPARCV9.cpp",
+ "Arch/SystemZ.cpp",
"Arch/X86.cpp",
"Arch/X86_64.cpp",
"CallGraphSort.cpp",
diff --git a/llvm/utils/gn/secondary/lldb/tools/lldb-dap/BUILD.gn b/llvm/utils/gn/secondary/lldb/tools/lldb-dap/BUILD.gn
index d8292df..98c2068 100644
--- a/llvm/utils/gn/secondary/lldb/tools/lldb-dap/BUILD.gn
+++ b/llvm/utils/gn/secondary/lldb/tools/lldb-dap/BUILD.gn
@@ -38,6 +38,7 @@ executable("lldb-dap") {
# FIXME: rpath/install_name stuff on macOS for framework on macOS
sources = [
+ "Breakpoint.cpp",
"BreakpointBase.cpp",
"DAP.cpp",
"ExceptionBreakpoint.cpp",
diff --git a/llvm/utils/gn/secondary/llvm/lib/Support/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Support/BUILD.gn
index 472c3e43..6caad81 100644
--- a/llvm/utils/gn/secondary/llvm/lib/Support/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/Support/BUILD.gn
@@ -77,6 +77,7 @@ static_library("Support") {
"ELFAttributes.cpp",
"Error.cpp",
"ErrorHandling.cpp",
+ "ExponentialBackoff.cpp",
"ExtensibleRTTI.cpp",
"FileCollector.cpp",
"FileOutputBuffer.cpp",
diff --git a/llvm/utils/gn/secondary/llvm/unittests/Support/BUILD.gn b/llvm/utils/gn/secondary/llvm/unittests/Support/BUILD.gn
index c6d40ae..7a152fd 100644
--- a/llvm/utils/gn/secondary/llvm/unittests/Support/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/unittests/Support/BUILD.gn
@@ -41,6 +41,7 @@ unittest("SupportTests") {
"ErrnoTest.cpp",
"ErrorOrTest.cpp",
"ErrorTest.cpp",
+ "ExponentialBackoffTest.cpp",
"ExtensibleRTTITest.cpp",
"FSUniqueIDTest.cpp",
"FileCollectorTest.cpp",
diff --git a/mlir/docs/TargetLLVMIR.md b/mlir/docs/TargetLLVMIR.md
index 27a399c..df281f6 100644
--- a/mlir/docs/TargetLLVMIR.md
+++ b/mlir/docs/TargetLLVMIR.md
@@ -336,7 +336,7 @@ func.func @bar() {
// is transformed into
llvm.func @foo(%arg0: i32, %arg1: i64) -> !llvm.struct<(i32, i64)> {
- // insert the vales into a structure
+ // insert the values into a structure
%0 = llvm.mlir.undef : !llvm.struct<(i32, i64)>
%1 = llvm.insertvalue %arg0, %0[0] : !llvm.struct<(i32, i64)>
%2 = llvm.insertvalue %arg1, %1[1] : !llvm.struct<(i32, i64)>
@@ -349,8 +349,8 @@ llvm.func @bar() {
%1 = llvm.mlir.constant(17 : i64) : i64
// call and extract the values from the structure
- %2 = llvm.call @bar(%0, %1)
- : (i32, i32) -> !llvm.struct<(i32, i64)>
+ %2 = llvm.call @foo(%0, %1)
+ : (i32, i64) -> !llvm.struct<(i32, i64)>
%3 = llvm.extractvalue %2[0] : !llvm.struct<(i32, i64)>
%4 = llvm.extractvalue %2[1] : !llvm.struct<(i32, i64)>
diff --git a/mlir/include/mlir-c/BuiltinTypes.h b/mlir/include/mlir-c/BuiltinTypes.h
index 881b6da..99c5e3f 100644
--- a/mlir/include/mlir-c/BuiltinTypes.h
+++ b/mlir/include/mlir-c/BuiltinTypes.h
@@ -73,6 +73,12 @@ MLIR_CAPI_EXPORTED MlirType mlirIndexTypeGet(MlirContext ctx);
// Floating-point types.
//===----------------------------------------------------------------------===//
+/// Checks whether the given type is a floating-point type.
+MLIR_CAPI_EXPORTED bool mlirTypeIsAFloat(MlirType type);
+
+/// Returns the bitwidth of a floating-point type.
+MLIR_CAPI_EXPORTED unsigned mlirFloatTypeGetWidth(MlirType type);
+
/// Returns the typeID of an Float8E5M2 type.
MLIR_CAPI_EXPORTED MlirTypeID mlirFloat8E5M2TypeGetTypeID(void);
diff --git a/mlir/include/mlir-c/Dialect/LLVM.h b/mlir/include/mlir-c/Dialect/LLVM.h
index 72701a8..ac216b0 100644
--- a/mlir/include/mlir-c/Dialect/LLVM.h
+++ b/mlir/include/mlir-c/Dialect/LLVM.h
@@ -34,11 +34,70 @@ MLIR_CAPI_EXPORTED MlirType
mlirLLVMFunctionTypeGet(MlirType resultType, intptr_t nArgumentTypes,
MlirType const *argumentTypes, bool isVarArg);
-/// Creates an LLVM literal (unnamed) struct type.
+/// Returns `true` if the type is an LLVM dialect struct type.
+MLIR_CAPI_EXPORTED bool mlirTypeIsALLVMStructType(MlirType type);
+
+/// Returns `true` if the type is a literal (unnamed) LLVM struct type.
+MLIR_CAPI_EXPORTED bool mlirLLVMStructTypeIsLiteral(MlirType type);
+
+/// Returns the number of fields in the struct. Asserts if the struct is opaque
+/// or not yet initialized.
+MLIR_CAPI_EXPORTED intptr_t mlirLLVMStructTypeGetNumElementTypes(MlirType type);
+
+/// Returns the `positions`-th field of the struct. Asserts if the struct is
+/// opaque, not yet initialized or if the position is out of range.
+MLIR_CAPI_EXPORTED MlirType mlirLLVMStructTypeGetElementType(MlirType type,
+ intptr_t position);
+
+/// Returns `true` if the struct is packed.
+MLIR_CAPI_EXPORTED bool mlirLLVMStructTypeIsPacked(MlirType type);
+
+/// Returns the identifier of the identified struct. Asserts that the struct is
+/// identified, i.e., not literal.
+MLIR_CAPI_EXPORTED MlirStringRef mlirLLVMStructTypeGetIdentifier(MlirType type);
+
+/// Returns `true` is the struct is explicitly opaque (will not have a body) or
+/// uninitialized (will eventually have a body).
+MLIR_CAPI_EXPORTED bool mlirLLVMStructTypeIsOpaque(MlirType type);
+
+/// Creates an LLVM literal (unnamed) struct type. This may assert if the fields
+/// have types not compatible with the LLVM dialect. For a graceful failure, use
+/// the checked version.
MLIR_CAPI_EXPORTED MlirType
mlirLLVMStructTypeLiteralGet(MlirContext ctx, intptr_t nFieldTypes,
MlirType const *fieldTypes, bool isPacked);
+/// Creates an LLVM literal (unnamed) struct type if possible. Emits a
+/// diagnostic at the given location and returns null otherwise.
+MLIR_CAPI_EXPORTED MlirType
+mlirLLVMStructTypeLiteralGetChecked(MlirLocation loc, intptr_t nFieldTypes,
+ MlirType const *fieldTypes, bool isPacked);
+
+/// Creates an LLVM identified struct type with no body. If a struct type with
+/// this name already exists in the context, returns that type. Use
+/// mlirLLVMStructTypeIdentifiedNewGet to create a fresh struct type,
+/// potentially renaming it. The body should be set separatelty by calling
+/// mlirLLVMStructTypeSetBody, if it isn't set already.
+MLIR_CAPI_EXPORTED MlirType mlirLLVMStructTypeIdentifiedGet(MlirContext ctx,
+ MlirStringRef name);
+
+/// Creates an LLVM identified struct type with no body and a name starting with
+/// the given prefix. If a struct with the exact name as the given prefix
+/// already exists, appends an unspecified suffix to the name so that the name
+/// is unique in context.
+MLIR_CAPI_EXPORTED MlirType mlirLLVMStructTypeIdentifiedNewGet(
+ MlirContext ctx, MlirStringRef name, intptr_t nFieldTypes,
+ MlirType const *fieldTypes, bool isPacked);
+
+MLIR_CAPI_EXPORTED MlirType mlirLLVMStructTypeOpaqueGet(MlirContext ctx,
+ MlirStringRef name);
+
+/// Sets the body of the identified struct if it hasn't been set yet. Returns
+/// whether the operation was successful.
+MLIR_CAPI_EXPORTED MlirLogicalResult
+mlirLLVMStructTypeSetBody(MlirType structType, intptr_t nFieldTypes,
+ MlirType const *fieldTypes, bool isPacked);
+
#ifdef __cplusplus
}
#endif
diff --git a/mlir/include/mlir-c/Dialect/SparseTensor.h b/mlir/include/mlir-c/Dialect/SparseTensor.h
index d549f5d..898d2f1 100644
--- a/mlir/include/mlir-c/Dialect/SparseTensor.h
+++ b/mlir/include/mlir-c/Dialect/SparseTensor.h
@@ -27,23 +27,19 @@ MLIR_DECLARE_CAPI_DIALECT_REGISTRATION(SparseTensor, sparse_tensor);
/// file.
typedef uint64_t MlirSparseTensorLevelType;
-enum MlirBaseSparseTensorLevelType {
+enum MlirSparseTensorLevelFormat {
MLIR_SPARSE_TENSOR_LEVEL_DENSE = 0x000000010000,
MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED = 0x000000020000,
- MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU = 0x000000020001,
- MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NO = 0x000000020002,
- MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU_NO = 0x000000020003,
MLIR_SPARSE_TENSOR_LEVEL_SINGLETON = 0x000000040000,
- MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU = 0x000000040001,
- MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NO = 0x000000040002,
- MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU_NO = 0x000000040003,
MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED = 0x000000080000,
- MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU = 0x000000080001,
- MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NO = 0x000000080002,
- MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU_NO = 0x000000080003,
MLIR_SPARSE_TENSOR_LEVEL_N_OUT_OF_M = 0x000000100000,
};
+enum MlirSparseTensorLevelPropertyNondefault {
+ MLIR_SPARSE_PROPERTY_NON_UNIQUE = 0x0001,
+ MLIR_SPARSE_PROPERTY_NON_ORDERED = 0x0002,
+};
+
//===----------------------------------------------------------------------===//
// SparseTensorEncodingAttr
//===----------------------------------------------------------------------===//
@@ -66,6 +62,10 @@ mlirSparseTensorEncodingGetLvlRank(MlirAttribute attr);
MLIR_CAPI_EXPORTED MlirSparseTensorLevelType
mlirSparseTensorEncodingAttrGetLvlType(MlirAttribute attr, intptr_t lvl);
+/// Returns a specified level-format of the `sparse_tensor.encoding` attribute.
+MLIR_CAPI_EXPORTED enum MlirSparseTensorLevelFormat
+mlirSparseTensorEncodingAttrGetLvlFmt(MlirAttribute attr, intptr_t lvl);
+
/// Returns the dimension-to-level mapping of the `sparse_tensor.encoding`
/// attribute.
MLIR_CAPI_EXPORTED MlirAffineMap
@@ -92,7 +92,9 @@ mlirSparseTensorEncodingAttrGetStructuredM(MlirSparseTensorLevelType lvlType);
MLIR_CAPI_EXPORTED MlirSparseTensorLevelType
mlirSparseTensorEncodingAttrBuildLvlType(
- enum MlirBaseSparseTensorLevelType lvlType, unsigned n, unsigned m);
+ enum MlirSparseTensorLevelFormat lvlFmt,
+ const enum MlirSparseTensorLevelPropertyNondefault *properties,
+ unsigned propSize, unsigned n, unsigned m);
#ifdef __cplusplus
}
diff --git a/mlir/include/mlir/Analysis/Presburger/Barvinok.h b/mlir/include/mlir/Analysis/Presburger/Barvinok.h
index b70ec33..f730a07 100644
--- a/mlir/include/mlir/Analysis/Presburger/Barvinok.h
+++ b/mlir/include/mlir/Analysis/Presburger/Barvinok.h
@@ -27,7 +27,9 @@
#include "mlir/Analysis/Presburger/GeneratingFunction.h"
#include "mlir/Analysis/Presburger/IntegerRelation.h"
#include "mlir/Analysis/Presburger/Matrix.h"
+#include "mlir/Analysis/Presburger/PresburgerRelation.h"
#include "mlir/Analysis/Presburger/QuasiPolynomial.h"
+#include <bitset>
#include <optional>
namespace mlir {
@@ -47,16 +49,22 @@ using PolyhedronV = IntMatrix;
using ConeH = PolyhedronH;
using ConeV = PolyhedronV;
-inline ConeH defineHRep(int numVars) {
+inline PolyhedronH defineHRep(int numVars, int numSymbols = 0) {
// We don't distinguish between domain and range variables, so
// we set the number of domain variables as 0 and the number of
// range variables as the number of actual variables.
- // There are no symbols (we don't work with parametric cones) and no local
- // (existentially quantified) variables.
+ //
+ // numSymbols is the number of parameters.
+ //
+ // There are no local (existentially quantified) variables.
+ //
+ // The number of symbols is the number of parameters. By default, we consider
+ // nonparametric polyhedra.
+ //
// Once the cone is defined, we use `addInequality()` to set inequalities.
- return ConeH(PresburgerSpace::getSetSpace(/*numDims=*/numVars,
- /*numSymbols=*/0,
- /*numLocals=*/0));
+ return PolyhedronH(PresburgerSpace::getSetSpace(/*numDims=*/numVars,
+ /*numSymbols=*/numSymbols,
+ /*numLocals=*/0));
}
/// Get the index of a cone, i.e., the volume of the parallelepiped
@@ -81,8 +89,38 @@ ConeH getDual(ConeV cone);
/// Compute the generating function for a unimodular cone.
/// The input cone must be unimodular; it assert-fails otherwise.
-GeneratingFunction unimodularConeGeneratingFunction(ParamPoint vertex, int sign,
- ConeH cone);
+GeneratingFunction computeUnimodularConeGeneratingFunction(ParamPoint vertex,
+ int sign,
+ ConeH cone);
+
+/// Find the solution of a set of equations that express affine constraints
+/// between a set of variables and a set of parameters. The solution expresses
+/// each variable as an affine function of the parameters.
+///
+/// If there is no solution, return null.
+std::optional<ParamPoint> solveParametricEquations(FracMatrix equations);
+
+/// Given a list of possibly intersecting regions (PresburgerSet) and the
+/// generating functions active in each region, produce a pairwise disjoint
+/// list of regions (chambers) and identify the generating function of the
+/// polytope in each chamber.
+///
+/// "Disjoint" here means that the intersection of two chambers is no full-
+/// dimensional.
+///
+/// The returned list partitions the universe into parts depending on which
+/// subset of GFs is active there, and gives the sum of active GFs for each
+/// part.
+std::vector<std::pair<PresburgerSet, GeneratingFunction>>
+computeChamberDecomposition(
+ unsigned numSymbols, ArrayRef<std::pair<PresburgerSet, GeneratingFunction>>
+ regionsAndGeneratingFunctions);
+
+/// Compute the generating function corresponding to a polytope.
+///
+/// All tangent cones of the polytope must be unimodular.
+std::vector<std::pair<PresburgerSet, GeneratingFunction>>
+computePolytopeGeneratingFunction(const PolyhedronH &poly);
/// Find a vector that is not orthogonal to any of the given vectors,
/// i.e., has nonzero dot product with those of the given vectors
diff --git a/mlir/include/mlir/Analysis/Presburger/GeneratingFunction.h b/mlir/include/mlir/Analysis/Presburger/GeneratingFunction.h
index c38eab6..db5b6b6 100644
--- a/mlir/include/mlir/Analysis/Presburger/GeneratingFunction.h
+++ b/mlir/include/mlir/Analysis/Presburger/GeneratingFunction.h
@@ -72,7 +72,7 @@ public:
return denominators;
}
- GeneratingFunction operator+(GeneratingFunction &gf) const {
+ GeneratingFunction operator+(const GeneratingFunction &gf) const {
assert(numParam == gf.getNumParams() &&
"two generating functions with different numbers of parameters "
"cannot be added!");
diff --git a/mlir/include/mlir/Analysis/Presburger/IntegerRelation.h b/mlir/include/mlir/Analysis/Presburger/IntegerRelation.h
index c476a02..27dc382 100644
--- a/mlir/include/mlir/Analysis/Presburger/IntegerRelation.h
+++ b/mlir/include/mlir/Analysis/Presburger/IntegerRelation.h
@@ -711,6 +711,17 @@ public:
/// return `this \ set`.
PresburgerRelation subtract(const PresburgerRelation &set) const;
+ // Remove equalities which have only zero coefficients.
+ void removeTrivialEqualities();
+
+ // Verify whether the relation is full-dimensional, i.e.,
+ // no equality holds for the relation.
+ //
+ // If there are no variables, it always returns true.
+ // If there is at least one variable and the relation is empty, it returns
+ // false.
+ bool isFullDim();
+
void print(raw_ostream &os) const;
void dump() const;
@@ -871,6 +882,26 @@ public:
/*numReservedEqualities=*/0,
/*numReservedCols=*/space.getNumVars() + 1, space) {}
+ /// Constructs a relation with the specified number of dimensions and symbols
+ /// and adds the given inequalities.
+ explicit IntegerPolyhedron(const PresburgerSpace &space,
+ IntMatrix inequalities)
+ : IntegerPolyhedron(space) {
+ for (unsigned i = 0, e = inequalities.getNumRows(); i < e; i++)
+ addInequality(inequalities.getRow(i));
+ }
+
+ /// Constructs a relation with the specified number of dimensions and symbols
+ /// and adds the given inequalities, after normalizing row-wise to integer
+ /// values.
+ explicit IntegerPolyhedron(const PresburgerSpace &space,
+ FracMatrix inequalities)
+ : IntegerPolyhedron(space) {
+ IntMatrix ineqsNormalized = inequalities.normalizeRows();
+ for (unsigned i = 0, e = inequalities.getNumRows(); i < e; i++)
+ addInequality(ineqsNormalized.getRow(i));
+ }
+
/// Construct a set from an IntegerRelation. The relation should have
/// no domain vars.
explicit IntegerPolyhedron(const IntegerRelation &rel)
diff --git a/mlir/include/mlir/Analysis/Presburger/Matrix.h b/mlir/include/mlir/Analysis/Presburger/Matrix.h
index 0d4a593..4484ebc 100644
--- a/mlir/include/mlir/Analysis/Presburger/Matrix.h
+++ b/mlir/include/mlir/Analysis/Presburger/Matrix.h
@@ -20,6 +20,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/raw_ostream.h"
+#include <bitset>
#include <cassert>
namespace mlir {
@@ -73,6 +74,8 @@ public:
T operator()(unsigned row, unsigned column) const { return at(row, column); }
+ bool operator==(const Matrix<T> &m) const;
+
/// Swap the given columns.
void swapColumns(unsigned column, unsigned otherColumn);
@@ -142,6 +145,9 @@ public:
/// Add `scale` multiples of the rowVec row to the specified row.
void addToRow(unsigned row, ArrayRef<T> rowVec, const T &scale);
+ /// Multiply the specified row by a factor of `scale`.
+ void scaleRow(unsigned row, const T &scale);
+
/// Add `scale` multiples of the source column to the target column.
void addToColumn(unsigned sourceColumn, unsigned targetColumn,
const T &scale);
@@ -156,6 +162,9 @@ public:
/// Negate the specified row.
void negateRow(unsigned row);
+ /// Negate the entire matrix.
+ void negateMatrix();
+
/// The given vector is interpreted as a row vector v. Post-multiply v with
/// this matrix, say M, and return vM.
SmallVector<T, 8> preMultiplyWithRow(ArrayRef<T> rowVec) const;
@@ -184,6 +193,19 @@ public:
// Transpose the matrix without modifying it.
Matrix<T> transpose() const;
+ // Copy the cells in the intersection of
+ // the rows between `fromRows` and `toRows` and
+ // the columns between `fromColumns` and `toColumns`, both inclusive.
+ Matrix<T> getSubMatrix(unsigned fromRow, unsigned toRow, unsigned fromColumn,
+ unsigned toColumn) const;
+
+ /// Split the rows of a matrix into two matrices according to which bits are
+ /// 1 and which are 0 in a given bitset.
+ ///
+ /// The first matrix returned has the rows corresponding to 1 and the second
+ /// corresponding to 2.
+ std::pair<Matrix<T>, Matrix<T>> splitByBitset(ArrayRef<int> indicator);
+
/// Print the matrix.
void print(raw_ostream &os) const;
void dump() const;
@@ -297,6 +319,10 @@ public:
// paper](https://www.cs.cmu.edu/~avrim/451f11/lectures/lect1129_LLL.pdf)
// calls `y`, usually 3/4.
void LLL(Fraction delta);
+
+ // Multiply each row of the matrix by the LCM of the denominators, thereby
+ // converting it to an integer matrix.
+ IntMatrix normalizeRows() const;
};
} // namespace presburger
diff --git a/mlir/include/mlir/Analysis/Presburger/PresburgerRelation.h b/mlir/include/mlir/Analysis/Presburger/PresburgerRelation.h
index c6b00ec..9634df6 100644
--- a/mlir/include/mlir/Analysis/Presburger/PresburgerRelation.h
+++ b/mlir/include/mlir/Analysis/Presburger/PresburgerRelation.h
@@ -217,6 +217,10 @@ public:
/// redundencies.
PresburgerRelation simplify() const;
+ /// Return whether the given PresburgerRelation is full-dimensional. By full-
+ /// dimensional we mean that it is not flat along any dimension.
+ bool isFullDim() const;
+
/// Print the set's internal state.
void print(raw_ostream &os) const;
void dump() const;
diff --git a/mlir/include/mlir/Analysis/Presburger/Simplex.h b/mlir/include/mlir/Analysis/Presburger/Simplex.h
index 9482f69b..7ee74c1 100644
--- a/mlir/include/mlir/Analysis/Presburger/Simplex.h
+++ b/mlir/include/mlir/Analysis/Presburger/Simplex.h
@@ -771,6 +771,12 @@ public:
std::pair<MaybeOptimum<MPInt>, MaybeOptimum<MPInt>>
computeIntegerBounds(ArrayRef<MPInt> coeffs);
+ /// Check if the simplex takes only one rational value along the
+ /// direction of `coeffs`.
+ ///
+ /// `this` must be nonempty.
+ bool isFlatAlong(ArrayRef<MPInt> coeffs);
+
/// Returns true if the polytope is unbounded, i.e., extends to infinity in
/// some direction. Otherwise, returns false.
bool isUnbounded();
diff --git a/mlir/include/mlir/Analysis/Presburger/Utils.h b/mlir/include/mlir/Analysis/Presburger/Utils.h
index e6d29e4..38262a6 100644
--- a/mlir/include/mlir/Analysis/Presburger/Utils.h
+++ b/mlir/include/mlir/Analysis/Presburger/Utils.h
@@ -286,6 +286,8 @@ Fraction dotProduct(ArrayRef<Fraction> a, ArrayRef<Fraction> b);
std::vector<Fraction> multiplyPolynomials(ArrayRef<Fraction> a,
ArrayRef<Fraction> b);
+bool isRangeZero(ArrayRef<Fraction> arr);
+
} // namespace presburger
} // namespace mlir
diff --git a/mlir/include/mlir/Dialect/AMDGPU/CMakeLists.txt b/mlir/include/mlir/Dialect/AMDGPU/CMakeLists.txt
index 9f57627..660deb2 100644
--- a/mlir/include/mlir/Dialect/AMDGPU/CMakeLists.txt
+++ b/mlir/include/mlir/Dialect/AMDGPU/CMakeLists.txt
@@ -1,2 +1,3 @@
add_subdirectory(IR)
+add_subdirectory(TransformOps)
add_subdirectory(Transforms)
diff --git a/mlir/include/mlir/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.h b/mlir/include/mlir/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.h
new file mode 100644
index 0000000..4fb4ab0
--- /dev/null
+++ b/mlir/include/mlir/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.h
@@ -0,0 +1,48 @@
+//===- AMDGPUTransformOps.h - AMDGPU transform ops ---------------*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_AMDGPU_TRANSFORMOPS_AMDGPUTRANSFORMOPS_H
+#define MLIR_DIALECT_AMDGPU_TRANSFORMOPS_AMDGPUTRANSFORMOPS_H
+
+#include "mlir/Dialect/Func/IR/FuncOps.h"
+#include "mlir/Dialect/Transform/IR/TransformAttrs.h"
+#include "mlir/Dialect/Transform/IR/TransformDialect.h"
+#include "mlir/Dialect/Transform/IR/TransformInterfaces.h"
+#include "mlir/IR/OpImplementation.h"
+#include "mlir/IR/RegionKindInterface.h"
+
+namespace mlir {
+namespace transform {
+class TransformHandleTypeInterface;
+} // namespace transform
+} // namespace mlir
+
+namespace mlir {
+class DialectRegistry;
+
+namespace linalg {
+class LinalgOp;
+} // namespace linalg
+
+namespace scf {
+class ForOp;
+} // namespace scf
+
+namespace amdgpu {
+void registerTransformDialectExtension(DialectRegistry &registry);
+} // namespace amdgpu
+} // namespace mlir
+
+//===----------------------------------------------------------------------===//
+// AMDGPU Transform Operations
+//===----------------------------------------------------------------------===//
+
+#define GET_OP_CLASSES
+#include "mlir/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.h.inc"
+
+#endif // MLIR_DIALECT_AMDGPU_TRANSFORMOPS_AMDGPUTRANSFORMOPS_H
diff --git a/mlir/include/mlir/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.td b/mlir/include/mlir/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.td
new file mode 100644
index 0000000..23873d8
--- /dev/null
+++ b/mlir/include/mlir/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.td
@@ -0,0 +1,45 @@
+//===- AMDGPUTransformOps.td - AMDGPU transform ops --------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef AMDGPU_TRANSFORM_OPS
+#define AMDGPU_TRANSFORM_OPS
+
+include "mlir/Dialect/Transform/IR/TransformAttrs.td"
+include "mlir/Dialect/Transform/IR/TransformDialect.td"
+include "mlir/Dialect/Transform/IR/TransformInterfaces.td"
+include "mlir/Dialect/Transform/IR/TransformTypes.td"
+include "mlir/Interfaces/SideEffectInterfaces.td"
+
+//===----------------------------------------------------------------------===//
+// ApplyOptimizeSharedMemoryReadsAndWritesOp
+//===----------------------------------------------------------------------===//
+
+def ApplyOptimizeSharedMemoryReadsAndWritesOp :
+ Op<Transform_Dialect, "amdgpu.optimize_shared_memory_reads_and_writes",
+ [DeclareOpInterfaceMethods<MemoryEffectsOpInterface>,
+ TransformOpInterface, TransformEachOpTrait]> {
+ let summary = "Reduce shared memory bank conflicts";
+ let description = [{ This op attempts to optimize GPU Shared memory
+ reads/writes with the goal of avoiding bank conflicts.
+ }];
+
+ let arguments = (ins TransformHandleTypeInterface:$target);
+ let results = (outs);
+
+ let assemblyFormat = "$target attr-dict `:` functional-type(operands, results)";
+
+ let extraClassDeclaration = [{
+ ::mlir::DiagnosedSilenceableFailure applyToOne(
+ ::mlir::transform::TransformRewriter &rewriter,
+ ::mlir::func::FuncOp funcOp,
+ ::mlir::transform::ApplyToEachResultList &results,
+ ::mlir::transform::TransformState &state);
+ }];
+}
+
+#endif // AMDGPU_TRANSFORM_OPS
diff --git a/mlir/include/mlir/Dialect/AMDGPU/TransformOps/CMakeLists.txt b/mlir/include/mlir/Dialect/AMDGPU/TransformOps/CMakeLists.txt
new file mode 100644
index 0000000..07bfebc
--- /dev/null
+++ b/mlir/include/mlir/Dialect/AMDGPU/TransformOps/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(LLVM_TARGET_DEFINITIONS AMDGPUTransformOps.td)
+mlir_tablegen(AMDGPUTransformOps.h.inc -gen-op-decls)
+mlir_tablegen(AMDGPUTransformOps.cpp.inc -gen-op-defs)
+add_public_tablegen_target(MLIRAMDGPUTransformOpsIncGen)
diff --git a/mlir/include/mlir/Dialect/AMDGPU/Transforms/Transforms.h b/mlir/include/mlir/Dialect/AMDGPU/Transforms/Transforms.h
index 140bc12..b4e9ad2 100644
--- a/mlir/include/mlir/Dialect/AMDGPU/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/AMDGPU/Transforms/Transforms.h
@@ -14,6 +14,7 @@
#ifndef MLIR_DIALECT_AMDGPU_TRANSFORMS_TRANSFORMS_H_
#define MLIR_DIALECT_AMDGPU_TRANSFORMS_TRANSFORMS_H_
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Operation.h"
#include "mlir/Support/LogicalResult.h"
@@ -48,6 +49,8 @@ namespace amdgpu {
mlir::LogicalResult optimizeSharedMemoryReadsAndWrites(Operation *parentOp,
Value memrefValue);
+void optimizeSharedMemoryReadsAndWritesOp(mlir::func::FuncOp funcOp);
+
} // namespace amdgpu
} // namespace mlir
diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
index 5a75944..8ec8e16 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
@@ -390,6 +390,25 @@ def NVVM_Barrier0Op : NVVM_Op<"barrier0"> {
let assemblyFormat = "attr-dict";
}
+def NVVM_BarrierOp : NVVM_Op<"barrier", [AttrSizedOperandSegments]> {
+ let arguments = (ins
+ Optional<I32>:$barrierId,
+ Optional<I32>:$numberOfThreads);
+ string llvmBuilder = [{
+ if ($numberOfThreads && $barrierId) {
+ createIntrinsicCall(builder, llvm::Intrinsic::nvvm_barrier,
+ {$barrierId, $numberOfThreads});
+ } else if($barrierId) {
+ createIntrinsicCall(builder, llvm::Intrinsic::nvvm_barrier_n,
+ {$barrierId});
+ } else {
+ createIntrinsicCall(builder, llvm::Intrinsic::nvvm_barrier0);
+ }
+ }];
+ let hasVerifier = 1;
+ let assemblyFormat = "(`id` `=` $barrierId^)? (`number_of_threads` `=` $numberOfThreads^)? attr-dict";
+}
+
def NVVM_ClusterArriveOp : NVVM_Op<"cluster.arrive"> {
let arguments = (ins OptionalAttr<UnitAttr>:$aligned);
diff --git a/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td b/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td
index 638e46a..962c159 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td
@@ -192,6 +192,22 @@ def ROCDL_BarrierOp : ROCDL_Op<"barrier"> {
let assemblyFormat = "attr-dict";
}
+def ROCDL_SetPrioOp : ROCDL_IntrOp<"s.setprio", [], [], [], 0>,
+ Arguments<(ins I16Attr:$priority)> {
+ let results = (outs);
+ let assemblyFormat = "$priority attr-dict";
+ string llvmBuilder =
+ "createIntrinsicCall(builder, llvm::Intrinsic::amdgcn_s_setprio,builder.getInt16(op.getPriority()));";
+}
+
+def ROCDL_SchedBarrier : ROCDL_IntrOp<"sched.barrier", [], [], [], 0>,
+ Arguments<(ins I32Attr:$mask)> {
+ let results = (outs);
+ let assemblyFormat = "$mask attr-dict";
+ string llvmBuilder =
+ "createIntrinsicCall(builder, llvm::Intrinsic::amdgcn_sched_barrier,builder.getInt32(op.getMask()));";
+}
+
//===---------------------------------------------------------------------===//
// Xdlops intrinsics
diff --git a/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td b/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td
index a0c0d4c..dda8f31 100644
--- a/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td
+++ b/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td
@@ -609,14 +609,16 @@ def NVGPU_MBarrierTryWaitParityOp : NVGPU_Op<"mbarrier.try_wait.parity", []> {
phase. Suspended thread resumes execution when the specified phase completes
OR before the phase completes following a system-dependent time limit.
+ The `$phaseParity` specifies either even phase (0) or odd phase (1) to
+ wait.
+
Example:
```mlir
- nvgpu.mbarrier.try_wait.parity %barrier, %phase, %ticks : !nvgpu.mbarrier.barrier<memorySpace = #gpu.address_space<workgroup>>
+ nvgpu.mbarrier.try_wait.parity %barrier, %phaseParity, %ticks : !nvgpu.mbarrier.barrier<memorySpace = #gpu.address_space<workgroup>>
```
-
}];
- let arguments = (ins NVGPU_MBarrierGroup:$barriers, Index:$phase, Index:$ticks, Index:$mbarId);
- let assemblyFormat = "$barriers `[` $mbarId `]` `,` $phase `,` $ticks attr-dict `:` type($barriers)";
+ let arguments = (ins NVGPU_MBarrierGroup:$barriers, I1:$phaseParity, Index:$ticks, Index:$mbarId);
+ let assemblyFormat = "$barriers `[` $mbarId `]` `,` $phaseParity `,` $ticks attr-dict `:` type($barriers)";
}
def NVGPU_TmaPrefetchOp : NVGPU_Op<"tma.prefetch.descriptor", []> {
diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
index 5d84217..0adf186 100644
--- a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
+++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
@@ -134,6 +134,97 @@ def DeclareTargetAttr : OpenMP_Attr<"DeclareTarget", "declaretarget"> {
}
//===----------------------------------------------------------------------===//
+// 2.19.4 Data-Sharing Attribute Clauses
+//===----------------------------------------------------------------------===//
+
+def DataSharingTypePrivate : I32EnumAttrCase<"Private", 0, "private">;
+def DataSharingTypeFirstPrivate : I32EnumAttrCase<"FirstPrivate", 1, "firstprivate">;
+
+def DataSharingClauseType : I32EnumAttr<
+ "DataSharingClauseType",
+ "Type of a data-sharing clause",
+ [DataSharingTypePrivate, DataSharingTypeFirstPrivate]> {
+ let genSpecializedAttr = 0;
+ let cppNamespace = "::mlir::omp";
+}
+
+def DataSharingClauseTypeAttr : EnumAttr<
+ OpenMP_Dialect, DataSharingClauseType, "data_sharing_type"> {
+ let assemblyFormat = "`{` `type` `=` $value `}`";
+}
+
+def PrivateClauseOp : OpenMP_Op<"private", [IsolatedFromAbove]> {
+ let summary = "Provides declaration of [first]private logic.";
+ let description = [{
+ This operation provides a declaration of how to implement the
+ [first]privatization of a variable. The dialect users should provide
+ information about how to create an instance of the type in the alloc region
+ and how to initialize the copy from the original item in the copy region.
+
+ Examples:
+ ---------
+ * `private(x)` would be emitted as:
+ ```mlir
+ omp.private {type = private} @x.privatizer : !fir.ref<i32> alloc {
+ ^bb0(%arg0: !fir.ref<i32>):
+ %0 = ... allocate proper memory for the private clone ...
+ omp.yield(%0 : !fir.ref<i32>)
+ }
+ ```
+
+ * `firstprivate(x)` would be emitted as:
+ ```mlir
+ omp.private {type = firstprivate} @x.privatizer : !fir.ref<i32> alloc {
+ ^bb0(%arg0: !fir.ref<i32>):
+ %0 = ... allocate proper memory for the private clone ...
+ omp.yield(%0 : !fir.ref<i32>)
+ } copy {
+ ^bb0(%arg0: !fir.ref<i32>, %arg1: !fir.ref<i32>):
+ // %arg0 is the original host variable. Same as for `alloc`.
+ // %arg1 represents the memory allocated in `alloc`.
+ ... copy from host to the privatized clone ....
+ omp.yield(%arg1 : !fir.ref<i32>)
+ }
+ ```
+
+ There are no restrictions on the body except for:
+ - The `alloc` region has a single argument.
+ - The `copy` region has 2 arguments.
+ - Both regions are terminated by `omp.yield` ops.
+ The above restrictions and other obvious restrictions (e.g. verifying the
+ type of yielded values) are verified by the custom op verifier. The actual
+ contents of the blocks inside both regions are not verified.
+
+ Instances of this op would then be used by ops that model directives that
+ accept data-sharing attribute clauses.
+
+ The $sym_name attribute provides a symbol by which the privatizer op can be
+ referenced by other dialect ops.
+
+ The $type attribute is the type of the value being privatized.
+
+ The $data_sharing_type attribute specifies whether privatizer corresponds
+ to a `private` or a `firstprivate` clause.
+ }];
+
+ let arguments = (ins SymbolNameAttr:$sym_name,
+ TypeAttrOf<AnyType>:$type,
+ DataSharingClauseTypeAttr:$data_sharing_type);
+
+ let regions = (region MinSizedRegion<1>:$alloc_region,
+ AnyRegion:$copy_region);
+
+ let assemblyFormat = [{
+ $data_sharing_type $sym_name `:` $type
+ `alloc` $alloc_region
+ (`copy` $copy_region^)?
+ attr-dict
+ }];
+
+ let hasVerifier = 1;
+}
+
+//===----------------------------------------------------------------------===//
// 2.6 parallel Construct
//===----------------------------------------------------------------------===//
@@ -434,13 +525,11 @@ def WsLoopOp : OpenMP_Op<"wsloop", [AttrSizedOperandSegments,
accumulator variables in `reduction_vars` and symbols referring to reduction
declarations in the `reductions` attribute. Each reduction is identified
by the accumulator it uses and accumulators must not be repeated in the same
- reduction. The `omp.reduction` operation accepts the accumulator and a
- partial value which is considered to be produced by the current loop
- iteration for the given reduction. If multiple values are produced for the
- same accumulator, i.e. there are multiple `omp.reduction`s, the last value
- is taken. The reduction declaration specifies how to combine the values from
- each iteration into the final value, which is available in the accumulator
- after the loop completes.
+ reduction. A private variable corresponding to the accumulator is used in
+ place of the accumulator inside the body of the worksharing-loop. The
+ reduction declaration specifies how to combine the values from each
+ iteration into the final value, which is available in the accumulator after
+ the loop completes.
The optional `schedule_val` attribute specifies the loop schedule for this
loop, determining how the loop is distributed across the parallel threads.
@@ -506,12 +595,9 @@ def WsLoopOp : OpenMP_Op<"wsloop", [AttrSizedOperandSegments,
|`nowait` $nowait
|`ordered` `(` $ordered_val `)`
|`order` `(` custom<ClauseAttr>($order_val) `)`
- |`reduction` `(`
- custom<ReductionVarList>(
- $reduction_vars, type($reduction_vars), $reductions
- ) `)`
- ) `for` custom<LoopControl>($region, $lowerBound, $upperBound, $step,
- type($step), $inclusive) attr-dict
+ ) custom<WsLoop>($region, $lowerBound, $upperBound, $step,
+ type($step), $reduction_vars, type($reduction_vars), $reductions,
+ $inclusive) attr-dict
}];
let hasVerifier = 1;
}
@@ -609,7 +695,7 @@ def SimdLoopOp : OpenMP_Op<"simdloop", [AttrSizedOperandSegments,
def YieldOp : OpenMP_Op<"yield",
[Pure, ReturnLike, Terminator,
ParentOneOf<["WsLoopOp", "ReductionDeclareOp",
- "AtomicUpdateOp", "SimdLoopOp"]>]> {
+ "AtomicUpdateOp", "SimdLoopOp", "PrivateClauseOp"]>]> {
let summary = "loop yield and termination operation";
let description = [{
"omp.yield" yields SSA values from the OpenMP dialect op region and
@@ -690,7 +776,7 @@ def ClauseTaskDependInOut : I32EnumAttrCase<"taskdependinout", 2>;
def ClauseTaskDepend : I32EnumAttr<
"ClauseTaskDepend",
- "task depend clause",
+ "depend clause in a target or task construct",
[ClauseTaskDependIn, ClauseTaskDependOut, ClauseTaskDependInOut]> {
let genSpecializedAttr = 0;
let cppNamespace = "::mlir::omp";
@@ -1356,11 +1442,17 @@ def Target_EnterDataOp: OpenMP_Op<"target_enter_data",
The $map_types specifies the types and modifiers for the map clause.
- TODO: depend clause and map_type_modifier values iterator and mapper.
+ The `depends` and `depend_vars` arguments are variadic lists of values
+ that specify the dependencies of this particular target task in relation to
+ other tasks.
+
+ TODO: map_type_modifier values iterator and mapper.
}];
let arguments = (ins Optional<I1>:$if_expr,
Optional<AnyInteger>:$device,
+ OptionalAttr<TaskDependArrayAttr>:$depends,
+ Variadic<OpenMP_PointerLikeType>:$depend_vars,
UnitAttr:$nowait,
Variadic<AnyType>:$map_operands);
@@ -1369,6 +1461,7 @@ def Target_EnterDataOp: OpenMP_Op<"target_enter_data",
| `device` `(` $device `:` type($device) `)`
| `nowait` $nowait
| `map_entries` `(` $map_operands `:` type($map_operands) `)`
+ | `depend` `(` custom<DependVarList>($depend_vars, type($depend_vars), $depends) `)`
) attr-dict
}];
@@ -1403,11 +1496,17 @@ def Target_ExitDataOp: OpenMP_Op<"target_exit_data",
The $map_types specifies the types and modifiers for the map clause.
- TODO: depend clause and map_type_modifier values iterator and mapper.
+ The `depends` and `depend_vars` arguments are variadic lists of values
+ that specify the dependencies of this particular target task in relation to
+ other tasks.
+
+ TODO: map_type_modifier values iterator and mapper.
}];
let arguments = (ins Optional<I1>:$if_expr,
Optional<AnyInteger>:$device,
+ OptionalAttr<TaskDependArrayAttr>:$depends,
+ Variadic<OpenMP_PointerLikeType>:$depend_vars,
UnitAttr:$nowait,
Variadic<AnyType>:$map_operands);
@@ -1416,6 +1515,7 @@ def Target_ExitDataOp: OpenMP_Op<"target_exit_data",
| `device` `(` $device `:` type($device) `)`
| `nowait` $nowait
| `map_entries` `(` $map_operands `:` type($map_operands) `)`
+ | `depend` `(` custom<DependVarList>($depend_vars, type($depend_vars), $depends) `)`
) attr-dict
}];
@@ -1454,11 +1554,16 @@ def Target_UpdateDataOp: OpenMP_Op<"target_update_data",
during verification to make sure the restrictions for target update are
respected.
- TODO: depend clause
+ The `depends` and `depend_vars` arguments are variadic lists of values
+ that specify the dependencies of this particular target task in relation to
+ other tasks.
+
}];
let arguments = (ins Optional<I1>:$if_expr,
Optional<AnyInteger>:$device,
+ OptionalAttr<TaskDependArrayAttr>:$depends,
+ Variadic<OpenMP_PointerLikeType>:$depend_vars,
UnitAttr:$nowait,
Variadic<OpenMP_PointerLikeType>:$map_operands);
@@ -1467,6 +1572,7 @@ def Target_UpdateDataOp: OpenMP_Op<"target_update_data",
| `device` `(` $device `:` type($device) `)`
| `nowait` $nowait
| `motion_entries` `(` $map_operands `:` type($map_operands) `)`
+ | `depend` `(` custom<DependVarList>($depend_vars, type($depend_vars), $depends) `)`
) attr-dict
}];
@@ -1496,13 +1602,19 @@ def TargetOp : OpenMP_Op<"target",[IsolatedFromAbove, MapClauseOwningOpInterface
The optional $nowait elliminates the implicit barrier so the parent task can make progress
even if the target task is not yet completed.
- TODO: is_device_ptr, depend, defaultmap, in_reduction
+ The `depends` and `depend_vars` arguments are variadic lists of values
+ that specify the dependencies of this particular target task in relation to
+ other tasks.
+
+ TODO: is_device_ptr, defaultmap, in_reduction
}];
let arguments = (ins Optional<I1>:$if_expr,
Optional<AnyInteger>:$device,
Optional<AnyInteger>:$thread_limit,
+ OptionalAttr<TaskDependArrayAttr>:$depends,
+ Variadic<OpenMP_PointerLikeType>:$depend_vars,
UnitAttr:$nowait,
Variadic<AnyType>:$map_operands);
@@ -1514,6 +1626,7 @@ def TargetOp : OpenMP_Op<"target",[IsolatedFromAbove, MapClauseOwningOpInterface
| `thread_limit` `(` $thread_limit `:` type($thread_limit) `)`
| `nowait` $nowait
| `map_entries` `(` custom<MapEntries>($map_operands, type($map_operands)) `)`
+ | `depend` `(` custom<DependVarList>($depend_vars, type($depend_vars), $depends) `)`
) $region attr-dict
}];
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
index e940d20..74cc0de 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
@@ -35,6 +35,7 @@
#include <cinttypes>
#include <complex>
#include <optional>
+#include <vector>
namespace mlir {
namespace sparse_tensor {
@@ -343,17 +344,31 @@ constexpr std::optional<LevelFormat> getLevelFormat(LevelType lt) {
/// Convert a LevelFormat to its corresponding LevelType with the given
/// properties. Returns std::nullopt when the properties are not applicable
/// for the input level format.
-constexpr std::optional<LevelType> buildLevelType(LevelFormat lf, bool ordered,
- bool unique, uint64_t n = 0,
- uint64_t m = 0) {
+inline std::optional<LevelType>
+buildLevelType(LevelFormat lf,
+ const std::vector<LevelPropertyNondefault> &properties,
+ uint64_t n = 0, uint64_t m = 0) {
uint64_t newN = n << 32;
uint64_t newM = m << 40;
- auto lt =
- static_cast<LevelType>(static_cast<uint64_t>(lf) | (ordered ? 0 : 2) |
- (unique ? 0 : 1) | newN | newM);
+ uint64_t ltInt = static_cast<uint64_t>(lf) | newN | newM;
+ for (auto p : properties) {
+ ltInt |= static_cast<uint64_t>(p);
+ }
+ auto lt = static_cast<LevelType>(ltInt);
return isValidLT(lt) ? std::optional(lt) : std::nullopt;
}
+inline std::optional<LevelType> buildLevelType(LevelFormat lf, bool ordered,
+ bool unique, uint64_t n = 0,
+ uint64_t m = 0) {
+ std::vector<LevelPropertyNondefault> properties;
+ if (!ordered)
+ properties.push_back(LevelPropertyNondefault::Nonordered);
+ if (!unique)
+ properties.push_back(LevelPropertyNondefault::Nonunique);
+ return buildLevelType(lf, properties, n, m);
+}
+
//
// Ensure the above methods work as intended.
//
@@ -381,57 +396,6 @@ static_assert(
"getLevelFormat conversion is broken");
static_assert(
- (buildLevelType(LevelFormat::Dense, false, true) == std::nullopt &&
- buildLevelType(LevelFormat::Dense, true, false) == std::nullopt &&
- buildLevelType(LevelFormat::Dense, false, false) == std::nullopt &&
- *buildLevelType(LevelFormat::Dense, true, true) == LevelType::Dense &&
- *buildLevelType(LevelFormat::Compressed, true, true) ==
- LevelType::Compressed &&
- *buildLevelType(LevelFormat::Compressed, true, false) ==
- LevelType::CompressedNu &&
- *buildLevelType(LevelFormat::Compressed, false, true) ==
- LevelType::CompressedNo &&
- *buildLevelType(LevelFormat::Compressed, false, false) ==
- LevelType::CompressedNuNo &&
- *buildLevelType(LevelFormat::Singleton, true, true) ==
- LevelType::Singleton &&
- *buildLevelType(LevelFormat::Singleton, true, false) ==
- LevelType::SingletonNu &&
- *buildLevelType(LevelFormat::Singleton, false, true) ==
- LevelType::SingletonNo &&
- *buildLevelType(LevelFormat::Singleton, false, false) ==
- LevelType::SingletonNuNo &&
- *buildLevelType(LevelFormat::LooseCompressed, true, true) ==
- LevelType::LooseCompressed &&
- *buildLevelType(LevelFormat::LooseCompressed, true, false) ==
- LevelType::LooseCompressedNu &&
- *buildLevelType(LevelFormat::LooseCompressed, false, true) ==
- LevelType::LooseCompressedNo &&
- *buildLevelType(LevelFormat::LooseCompressed, false, false) ==
- LevelType::LooseCompressedNuNo &&
- buildLevelType(LevelFormat::NOutOfM, false, true) == std::nullopt &&
- buildLevelType(LevelFormat::NOutOfM, true, false) == std::nullopt &&
- buildLevelType(LevelFormat::NOutOfM, false, false) == std::nullopt &&
- *buildLevelType(LevelFormat::NOutOfM, true, true) == LevelType::NOutOfM),
- "buildLevelType conversion is broken");
-
-static_assert(
- (getN(*buildLevelType(LevelFormat::NOutOfM, true, true, 2, 4)) == 2 &&
- getM(*buildLevelType(LevelFormat::NOutOfM, true, true, 2, 4)) == 4 &&
- getN(*buildLevelType(LevelFormat::NOutOfM, true, true, 8, 10)) == 8 &&
- getM(*buildLevelType(LevelFormat::NOutOfM, true, true, 8, 10)) == 10),
- "getN/M conversion is broken");
-
-static_assert(
- (isValidNOutOfMLT(*buildLevelType(LevelFormat::NOutOfM, true, true, 2, 4),
- 2, 4) &&
- isValidNOutOfMLT(*buildLevelType(LevelFormat::NOutOfM, true, true, 8, 10),
- 8, 10) &&
- !isValidNOutOfMLT(*buildLevelType(LevelFormat::NOutOfM, true, true, 3, 4),
- 2, 4)),
- "isValidNOutOfMLT definition is broken");
-
-static_assert(
(isValidLT(LevelType::Undef) && isValidLT(LevelType::Dense) &&
isValidLT(LevelType::Compressed) && isValidLT(LevelType::CompressedNu) &&
isValidLT(LevelType::CompressedNo) &&
diff --git a/mlir/include/mlir/Dialect/Vector/Transforms/VectorRewritePatterns.h b/mlir/include/mlir/Dialect/Vector/Transforms/VectorRewritePatterns.h
index f5941d3..7c943f0 100644
--- a/mlir/include/mlir/Dialect/Vector/Transforms/VectorRewritePatterns.h
+++ b/mlir/include/mlir/Dialect/Vector/Transforms/VectorRewritePatterns.h
@@ -20,7 +20,9 @@
#include "mlir/Dialect/Vector/Transforms/VectorTransformsEnums.h.inc"
namespace mlir {
+class ConversionTarget;
class RewritePatternSet;
+class TypeConverter;
namespace arith {
class AndIOp;
@@ -375,6 +377,13 @@ void populateVectorNarrowTypeRewritePatterns(RewritePatternSet &patterns,
void populateVectorTransposeNarrowTypeRewritePatterns(
RewritePatternSet &patterns, PatternBenefit benefit = 1);
+/// Populates patterns for ND vectors (N >= 2) linearization and sets up the
+/// provided ConversionTarget with the appropriate legality configuration for
+/// the ops to get converted properly.
+void populateVectorLinearizeTypeConversionsAndLegality(
+ TypeConverter &typeConverter, RewritePatternSet &patterns,
+ ConversionTarget &target);
+
} // namespace vector
} // namespace mlir
diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
index 1418217..eff1aca 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
@@ -465,9 +465,6 @@ private:
/// Computes the assembled-size associated with the `l`-th level,
/// given the assembled-size associated with the `(l-1)`-th level.
- /// "Assembled-sizes" correspond to the (nominal) sizes of overhead
- /// storage, as opposed to "level-sizes" which are the cardinality
- /// of possible coordinates for that level.
uint64_t assembledSize(uint64_t parentSz, uint64_t l) const {
if (isCompressedLvl(l))
return positions[l][parentSz];
@@ -764,11 +761,6 @@ SparseTensorStorage<P, C, V>::SparseTensorStorage(
// Note that none of the buffers can be reused because ownership
// of the memory passed from clients is not necessarily transferred.
// Therefore, all data is copied over into a new SparseTensorStorage.
- //
- // TODO: this needs to be generalized to all formats AND
- // we need a proper audit of e.g. double compressed
- // levels where some are not filled
- //
uint64_t trailCOOLen = 0, parentSz = 1, bufIdx = 0;
for (uint64_t l = 0; l < lvlRank; l++) {
if (!isUniqueLvl(l) && (isCompressedLvl(l) || isLooseCompressedLvl(l))) {
diff --git a/mlir/include/mlir/IR/PatternMatch.h b/mlir/include/mlir/IR/PatternMatch.h
index 78dcfe7..b8aeea0 100644
--- a/mlir/include/mlir/IR/PatternMatch.h
+++ b/mlir/include/mlir/IR/PatternMatch.h
@@ -588,8 +588,7 @@ public:
/// Unlink this operation from its current block and insert it right before
/// `iterator` in the specified block.
- virtual void moveOpBefore(Operation *op, Block *block,
- Block::iterator iterator);
+ void moveOpBefore(Operation *op, Block *block, Block::iterator iterator);
/// Unlink this operation from its current block and insert it right after
/// `existingOp` which may be in the same or another block in the same
@@ -598,8 +597,7 @@ public:
/// Unlink this operation from its current block and insert it right after
/// `iterator` in the specified block.
- virtual void moveOpAfter(Operation *op, Block *block,
- Block::iterator iterator);
+ void moveOpAfter(Operation *op, Block *block, Block::iterator iterator);
/// Unlink this block and insert it right before `existingBlock`.
void moveBlockBefore(Block *block, Block *anotherBlock);
diff --git a/mlir/include/mlir/InitAllExtensions.h b/mlir/include/mlir/InitAllExtensions.h
index 7708ca5..b31fb26f0 100644
--- a/mlir/include/mlir/InitAllExtensions.h
+++ b/mlir/include/mlir/InitAllExtensions.h
@@ -23,6 +23,7 @@
#include "mlir/Conversion/MemRefToLLVM/MemRefToLLVM.h"
#include "mlir/Conversion/NVVMToLLVM/NVVMToLLVM.h"
#include "mlir/Conversion/UBToLLVM/UBToLLVM.h"
+#include "mlir/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.h"
#include "mlir/Dialect/Affine/TransformOps/AffineTransformOps.h"
#include "mlir/Dialect/Bufferization/TransformOps/BufferizationTransformOps.h"
#include "mlir/Dialect/Func/Extensions/AllExtensions.h"
@@ -66,6 +67,7 @@ inline void registerAllExtensions(DialectRegistry &registry) {
ub::registerConvertUBToLLVMInterface(registry);
// Register all transform dialect extensions.
+ amdgpu::registerTransformDialectExtension(registry);
affine::registerTransformDialectExtension(registry);
bufferization::registerTransformDialectExtension(registry);
func::registerTransformDialectExtension(registry);
diff --git a/mlir/include/mlir/Transforms/DialectConversion.h b/mlir/include/mlir/Transforms/DialectConversion.h
index b1ec1fe..15fa39b 100644
--- a/mlir/include/mlir/Transforms/DialectConversion.h
+++ b/mlir/include/mlir/Transforms/DialectConversion.h
@@ -604,6 +604,29 @@ private:
using ConversionPattern::matchAndRewrite;
};
+/// OpTraitConversionPattern is a wrapper around ConversionPattern that allows
+/// for matching and rewriting against instances of an operation that possess a
+/// given trait.
+template <template <typename> class TraitType>
+class OpTraitConversionPattern : public ConversionPattern {
+public:
+ OpTraitConversionPattern(MLIRContext *context, PatternBenefit benefit = 1)
+ : ConversionPattern(Pattern::MatchTraitOpTypeTag(),
+ TypeID::get<TraitType>(), benefit, context) {}
+ OpTraitConversionPattern(const TypeConverter &typeConverter,
+ MLIRContext *context, PatternBenefit benefit = 1)
+ : ConversionPattern(typeConverter, Pattern::MatchTraitOpTypeTag(),
+ TypeID::get<TraitType>(), benefit, context) {}
+};
+
+/// Generic utility to convert op result types according to type converter
+/// without knowing exact op type.
+/// Clones existing op with new result types and returns it.
+FailureOr<Operation *>
+convertOpResultTypes(Operation *op, ValueRange operands,
+ const TypeConverter &converter,
+ ConversionPatternRewriter &rewriter);
+
/// Add a pattern to the given pattern list to convert the signature of a
/// FunctionOpInterface op with the given type converter. This only supports
/// ops which use FunctionType to represent their type.
@@ -632,8 +655,7 @@ struct ConversionPatternRewriterImpl;
/// This class implements a pattern rewriter for use with ConversionPatterns. It
/// extends the base PatternRewriter and provides special conversion specific
/// hooks.
-class ConversionPatternRewriter final : public PatternRewriter,
- public RewriterBase::Listener {
+class ConversionPatternRewriter final : public PatternRewriter {
public:
explicit ConversionPatternRewriter(MLIRContext *ctx);
~ConversionPatternRewriter() override;
@@ -712,10 +734,6 @@ public:
/// implemented for dialect conversion.
void eraseBlock(Block *block) override;
- /// PatternRewriter hook creating a new block.
- void notifyBlockInserted(Block *block, Region *previous,
- Region::iterator previousIt) override;
-
/// PatternRewriter hook for splitting a block into two parts.
Block *splitBlock(Block *block, Block::iterator before) override;
@@ -724,13 +742,10 @@ public:
ValueRange argValues = std::nullopt) override;
using PatternRewriter::inlineBlockBefore;
- /// PatternRewriter hook for inserting a new operation.
- void notifyOperationInserted(Operation *op, InsertPoint previous) override;
-
/// PatternRewriter hook for updating the given operation in-place.
/// Note: These methods only track updates to the given operation itself,
- /// and not nested regions. Updates to regions will still require notification
- /// through other more specific hooks above.
+ /// and not nested regions. Updates to regions will still require
+ /// notification through other more specific hooks above.
void startOpModification(Operation *op) override;
/// PatternRewriter hook for updating the given operation in-place.
@@ -739,25 +754,13 @@ public:
/// PatternRewriter hook for updating the given operation in-place.
void cancelOpModification(Operation *op) override;
- /// PatternRewriter hook for notifying match failure reasons.
- void
- notifyMatchFailure(Location loc,
- function_ref<void(Diagnostic &)> reasonCallback) override;
- using PatternRewriter::notifyMatchFailure;
-
/// Return a reference to the internal implementation.
detail::ConversionPatternRewriterImpl &getImpl();
private:
// Hide unsupported pattern rewriter API.
- using OpBuilder::getListener;
using OpBuilder::setListener;
- void moveOpBefore(Operation *op, Block *block,
- Block::iterator iterator) override;
- void moveOpAfter(Operation *op, Block *block,
- Block::iterator iterator) override;
-
std::unique_ptr<detail::ConversionPatternRewriterImpl> impl;
};
diff --git a/mlir/lib/Analysis/Presburger/Barvinok.cpp b/mlir/lib/Analysis/Presburger/Barvinok.cpp
index d2752de..b6d1f99 100644
--- a/mlir/lib/Analysis/Presburger/Barvinok.cpp
+++ b/mlir/lib/Analysis/Presburger/Barvinok.cpp
@@ -10,6 +10,7 @@
#include "mlir/Analysis/Presburger/Utils.h"
#include "llvm/ADT/Sequence.h"
#include <algorithm>
+#include <bitset>
using namespace mlir;
using namespace presburger;
@@ -76,7 +77,8 @@ MPInt mlir::presburger::detail::getIndex(ConeV cone) {
/// num is computed by expressing the vertex as a weighted
/// sum of the generators, and then taking the floor of the
/// coefficients.
-GeneratingFunction mlir::presburger::detail::unimodularConeGeneratingFunction(
+GeneratingFunction
+mlir::presburger::detail::computeUnimodularConeGeneratingFunction(
ParamPoint vertex, int sign, ConeH cone) {
// Consider a cone with H-representation [0 -1].
// [-1 -2]
@@ -84,7 +86,7 @@ GeneratingFunction mlir::presburger::detail::unimodularConeGeneratingFunction(
// [-1 -1/2 1]
// `cone` must be unimodular.
- assert(getIndex(getDual(cone)) == 1 && "input cone is not unimodular!");
+ assert(abs(getIndex(getDual(cone))) == 1 && "input cone is not unimodular!");
unsigned numVar = cone.getNumVars();
unsigned numIneq = cone.getNumInequalities();
@@ -147,6 +149,304 @@ GeneratingFunction mlir::presburger::detail::unimodularConeGeneratingFunction(
std::vector({denominator}));
}
+/// We use Gaussian elimination to find the solution to a set of d equations
+/// of the form
+/// a_1 x_1 + ... + a_d x_d + b_1 m_1 + ... + b_p m_p + c = 0
+/// where x_i are variables,
+/// m_i are parameters and
+/// a_i, b_i, c are rational coefficients.
+///
+/// The solution expresses each x_i as an affine function of the m_i, and is
+/// therefore represented as a matrix of size d x (p+1).
+/// If there is no solution, we return null.
+std::optional<ParamPoint>
+mlir::presburger::detail::solveParametricEquations(FracMatrix equations) {
+ // equations is a d x (d + p + 1) matrix.
+ // Each row represents an equation.
+ unsigned d = equations.getNumRows();
+ unsigned numCols = equations.getNumColumns();
+
+ // If the determinant is zero, there is no unique solution.
+ // Thus we return null.
+ if (FracMatrix(equations.getSubMatrix(/*fromRow=*/0, /*toRow=*/d - 1,
+ /*fromColumn=*/0,
+ /*toColumn=*/d - 1))
+ .determinant() == 0)
+ return std::nullopt;
+
+ // Perform row operations to make each column all zeros except for the
+ // diagonal element, which is made to be one.
+ for (unsigned i = 0; i < d; ++i) {
+ // First ensure that the diagonal element is nonzero, by swapping
+ // it with a row that is non-zero at column i.
+ if (equations(i, i) != 0)
+ continue;
+ for (unsigned j = i + 1; j < d; ++j) {
+ if (equations(j, i) == 0)
+ continue;
+ equations.swapRows(j, i);
+ break;
+ }
+
+ Fraction diagElement = equations(i, i);
+
+ // Apply row operations to make all elements except the diagonal to zero.
+ for (unsigned j = 0; j < d; ++j) {
+ if (i == j)
+ continue;
+ if (equations(j, i) == 0)
+ continue;
+ // Apply row operations to make element (j, i) zero by subtracting the
+ // ith row, appropriately scaled.
+ Fraction currentElement = equations(j, i);
+ equations.addToRow(/*sourceRow=*/i, /*targetRow=*/j,
+ /*scale=*/-currentElement / diagElement);
+ }
+ }
+
+ // Rescale diagonal elements to 1.
+ for (unsigned i = 0; i < d; ++i)
+ equations.scaleRow(i, 1 / equations(i, i));
+
+ // Now we have reduced the equations to the form
+ // x_i + b_1' m_1 + ... + b_p' m_p + c' = 0
+ // i.e. each variable appears exactly once in the system, and has coefficient
+ // one.
+ //
+ // Thus we have
+ // x_i = - b_1' m_1 - ... - b_p' m_p - c
+ // and so we return the negation of the last p + 1 columns of the matrix.
+ //
+ // We copy these columns and return them.
+ ParamPoint vertex =
+ equations.getSubMatrix(/*fromRow=*/0, /*toRow=*/d - 1,
+ /*fromColumn=*/d, /*toColumn=*/numCols - 1);
+ vertex.negateMatrix();
+ return vertex;
+}
+
+/// This is an implementation of the Clauss-Loechner algorithm for chamber
+/// decomposition.
+///
+/// We maintain a list of pairwise disjoint chambers and the generating
+/// functions corresponding to each one. We iterate over the list of regions,
+/// each time adding the current region's generating function to the chambers
+/// where it is active and separating the chambers where it is not.
+///
+/// Given the region each generating function is active in, for each subset of
+/// generating functions the region that (the sum of) precisely this subset is
+/// in, is the intersection of the regions that these are active in,
+/// intersected with the complements of the remaining regions.
+std::vector<std::pair<PresburgerSet, GeneratingFunction>>
+mlir::presburger::detail::computeChamberDecomposition(
+ unsigned numSymbols, ArrayRef<std::pair<PresburgerSet, GeneratingFunction>>
+ regionsAndGeneratingFunctions) {
+ assert(!regionsAndGeneratingFunctions.empty() &&
+ "there must be at least one chamber!");
+ // We maintain a list of regions and their associated generating function
+ // initialized with the universe and the empty generating function.
+ std::vector<std::pair<PresburgerSet, GeneratingFunction>> chambers = {
+ {PresburgerSet::getUniverse(PresburgerSpace::getSetSpace(numSymbols)),
+ GeneratingFunction(numSymbols, {}, {}, {})}};
+
+ // We iterate over the region list.
+ //
+ // For each activity region R_j (corresponding to the generating function
+ // gf_j), we examine all the current chambers R_i.
+ //
+ // If R_j has a full-dimensional intersection with an existing chamber R_i,
+ // then that chamber is replaced by two new ones:
+ // 1. the intersection R_i \cap R_j, where the generating function is
+ // gf_i + gf_j.
+ // 2. the difference R_i - R_j, where the generating function is gf_i.
+ //
+ // At each step, we define a new chamber list after considering gf_j,
+ // replacing and appending chambers as discussed above.
+ //
+ // The loop has the invariant that the union over all the chambers gives the
+ // universe at every step.
+ for (const auto &[region, generatingFunction] :
+ regionsAndGeneratingFunctions) {
+ std::vector<std::pair<PresburgerSet, GeneratingFunction>> newChambers;
+
+ for (const auto &[currentRegion, currentGeneratingFunction] : chambers) {
+ PresburgerSet intersection = currentRegion.intersect(region);
+
+ // If the intersection is not full-dimensional, we do not modify
+ // the chamber list.
+ if (!intersection.isFullDim()) {
+ newChambers.emplace_back(currentRegion, currentGeneratingFunction);
+ continue;
+ }
+
+ // If it is, we add the intersection and the difference as chambers.
+ newChambers.emplace_back(intersection,
+ currentGeneratingFunction + generatingFunction);
+ newChambers.emplace_back(currentRegion.subtract(region),
+ currentGeneratingFunction);
+ }
+ chambers = std::move(newChambers);
+ }
+
+ return chambers;
+}
+
+/// For a polytope expressed as a set of n inequalities, compute the generating
+/// function corresponding to the lattice points included in the polytope. This
+/// algorithm has three main steps:
+/// 1. Enumerate the vertices, by iterating over subsets of inequalities and
+/// checking for satisfiability. For each d-subset of inequalities (where d
+/// is the number of variables), we solve to obtain the vertex in terms of
+/// the parameters, and then check for the region in parameter space where
+/// this vertex satisfies the remaining (n - d) inequalities.
+/// 2. For each vertex, identify the tangent cone and compute the generating
+/// function corresponding to it. The generating function depends on the
+/// parametric expression of the vertex and the (non-parametric) generators
+/// of the tangent cone.
+/// 3. [Clauss-Loechner decomposition] Identify the regions in parameter space
+/// (chambers) where each vertex is active, and accordingly compute the
+/// GF of the polytope in each chamber.
+///
+/// Verdoolaege, Sven, et al. "Counting integer points in parametric
+/// polytopes using Barvinok's rational functions." Algorithmica 48 (2007):
+/// 37-66.
+std::vector<std::pair<PresburgerSet, GeneratingFunction>>
+mlir::presburger::detail::computePolytopeGeneratingFunction(
+ const PolyhedronH &poly) {
+ unsigned numVars = poly.getNumRangeVars();
+ unsigned numSymbols = poly.getNumSymbolVars();
+ unsigned numIneqs = poly.getNumInequalities();
+
+ // We store a list of the computed vertices.
+ std::vector<ParamPoint> vertices;
+ // For each vertex, we store the corresponding active region and the
+ // generating functions of the tangent cone, in order.
+ std::vector<std::pair<PresburgerSet, GeneratingFunction>>
+ regionsAndGeneratingFunctions;
+
+ // We iterate over all subsets of inequalities with cardinality numVars,
+ // using permutations of numVars 1's and (numIneqs - numVars) 0's.
+ //
+ // For a given permutation, we consider a subset which contains
+ // the i'th inequality if the i'th bit in the bitset is 1.
+ //
+ // We start with the permutation that takes the last numVars inequalities.
+ SmallVector<int> indicator(numIneqs);
+ for (unsigned i = numIneqs - numVars; i < numIneqs; ++i)
+ indicator[i] = 1;
+
+ do {
+ // Collect the inequalities corresponding to the bits which are set
+ // and the remaining ones.
+ auto [subset, remainder] = poly.getInequalities().splitByBitset(indicator);
+ // All other inequalities are stored in a2 and b2c2.
+ //
+ // These are column-wise splits of the inequalities;
+ // a2 stores the coefficients of the variables, and
+ // b2c2 stores the coefficients of the parameters and the constant term.
+ FracMatrix a2(numIneqs - numVars, numVars);
+ FracMatrix b2c2(numIneqs - numVars, numSymbols + 1);
+ a2 = FracMatrix(
+ remainder.getSubMatrix(0, numIneqs - numVars - 1, 0, numVars - 1));
+ b2c2 = FracMatrix(remainder.getSubMatrix(0, numIneqs - numVars - 1, numVars,
+ numVars + numSymbols));
+
+ // Find the vertex, if any, corresponding to the current subset of
+ // inequalities.
+ std::optional<ParamPoint> vertex =
+ solveParametricEquations(FracMatrix(subset)); // d x (p+1)
+
+ if (!vertex)
+ continue;
+ if (std::find(vertices.begin(), vertices.end(), vertex) != vertices.end())
+ continue;
+ // If this subset corresponds to a vertex that has not been considered,
+ // store it.
+ vertices.push_back(*vertex);
+
+ // If a vertex is formed by the intersection of more than d facets, we
+ // assume that any d-subset of these facets can be solved to obtain its
+ // expression. This assumption is valid because, if the vertex has two
+ // distinct parametric expressions, then a nontrivial equality among the
+ // parameters holds, which is a contradiction as we know the parameter
+ // space to be full-dimensional.
+
+ // Let the current vertex be [X | y], where
+ // X represents the coefficients of the parameters and
+ // y represents the constant term.
+ //
+ // The region (in parameter space) where this vertex is active is given
+ // by substituting the vertex into the *remaining* inequalities of the
+ // polytope (those which were not collected into `subset`), i.e., into the
+ // inequalities [A2 | B2 | c2].
+ //
+ // Thus, the coefficients of the parameters after substitution become
+ // (A2 • X + B2)
+ // and the constant terms become
+ // (A2 • y + c2).
+ //
+ // The region is therefore given by
+ // (A2 • X + B2) p + (A2 • y + c2) ≥ 0
+ //
+ // This is equivalent to A2 • [X | y] + [B2 | c2].
+ //
+ // Thus we premultiply [X | y] with each row of A2
+ // and add each row of [B2 | c2].
+ FracMatrix activeRegion(numIneqs - numVars, numSymbols + 1);
+ for (unsigned i = 0; i < numIneqs - numVars; i++) {
+ activeRegion.setRow(i, vertex->preMultiplyWithRow(a2.getRow(i)));
+ activeRegion.addToRow(i, b2c2.getRow(i), 1);
+ }
+
+ // We convert the representation of the active region to an integers-only
+ // form so as to store it as a PresburgerSet.
+ IntegerPolyhedron activeRegionRel(
+ PresburgerSpace::getRelationSpace(0, numSymbols, 0, 0), activeRegion);
+
+ // Now, we compute the generating function at this vertex.
+ // We collect the inequalities corresponding to each vertex to compute
+ // the tangent cone at that vertex.
+ //
+ // We only need the coefficients of the variables (NOT the parameters)
+ // as the generating function only depends on these.
+ // We translate the cones to be pointed at the origin by making the
+ // constant terms zero.
+ ConeH tangentCone = defineHRep(numVars);
+ for (unsigned j = 0, e = subset.getNumRows(); j < e; ++j) {
+ SmallVector<MPInt> ineq(numVars + 1);
+ for (unsigned k = 0; k < numVars; ++k)
+ ineq[k] = subset(j, k);
+ tangentCone.addInequality(ineq);
+ }
+ // We assume that the tangent cone is unimodular, so there is no need
+ // to decompose it.
+ //
+ // In the general case, the unimodular decomposition may have several
+ // cones.
+ GeneratingFunction vertexGf(numSymbols, {}, {}, {});
+ SmallVector<std::pair<int, ConeH>, 4> unimodCones = {{1, tangentCone}};
+ for (std::pair<int, ConeH> signedCone : unimodCones) {
+ auto [sign, cone] = signedCone;
+ vertexGf = vertexGf +
+ computeUnimodularConeGeneratingFunction(*vertex, sign, cone);
+ }
+ // We store the vertex we computed with the generating function of its
+ // tangent cone.
+ regionsAndGeneratingFunctions.emplace_back(PresburgerSet(activeRegionRel),
+ vertexGf);
+ } while (std::next_permutation(indicator.begin(), indicator.end()));
+
+ // Now, we use Clauss-Loechner decomposition to identify regions in parameter
+ // space where each vertex is active. These regions (chambers) have the
+ // property that no two of them have a full-dimensional intersection, i.e.,
+ // they may share "facets" or "edges", but their intersection can only have
+ // up to numVars - 1 dimensions.
+ //
+ // In each chamber, we sum up the generating functions of the active vertices
+ // to find the generating function of the polytope.
+ return computeChamberDecomposition(numSymbols, regionsAndGeneratingFunctions);
+}
+
/// We use an iterative procedure to find a vector not orthogonal
/// to a given set, ignoring the null vectors.
/// Let the inputs be {x_1, ..., x_k}, all vectors of length n.
diff --git a/mlir/lib/Analysis/Presburger/IntegerRelation.cpp b/mlir/lib/Analysis/Presburger/IntegerRelation.cpp
index 7d2a63d..2ac271e 100644
--- a/mlir/lib/Analysis/Presburger/IntegerRelation.cpp
+++ b/mlir/lib/Analysis/Presburger/IntegerRelation.cpp
@@ -26,6 +26,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Sequence.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -2498,6 +2499,31 @@ void IntegerRelation::printSpace(raw_ostream &os) const {
os << getNumConstraints() << " constraints\n";
}
+void IntegerRelation::removeTrivialEqualities() {
+ for (int i = getNumEqualities() - 1; i >= 0; --i)
+ if (rangeIsZero(getEquality(i)))
+ removeEquality(i);
+}
+
+bool IntegerRelation::isFullDim() {
+ if (getNumVars() == 0)
+ return true;
+ if (isEmpty())
+ return false;
+
+ // If there is a non-trivial equality, the space cannot be full-dimensional.
+ removeTrivialEqualities();
+ if (getNumEqualities() > 0)
+ return false;
+
+ // The polytope is full-dimensional iff it is not flat along any of the
+ // inequality directions.
+ Simplex simplex(*this);
+ return llvm::none_of(llvm::seq<int>(getNumInequalities()), [&](int i) {
+ return simplex.isFlatAlong(getInequality(i));
+ });
+}
+
void IntegerRelation::print(raw_ostream &os) const {
assert(hasConsistentState());
printSpace(os);
diff --git a/mlir/lib/Analysis/Presburger/Matrix.cpp b/mlir/lib/Analysis/Presburger/Matrix.cpp
index bd7f7f5..4cb6e6b 100644
--- a/mlir/lib/Analysis/Presburger/Matrix.cpp
+++ b/mlir/lib/Analysis/Presburger/Matrix.cpp
@@ -29,6 +29,22 @@ Matrix<T>::Matrix(unsigned rows, unsigned columns, unsigned reservedRows,
data.reserve(std::max(nRows, reservedRows) * nReservedColumns);
}
+/// We cannot use the default implementation of operator== as it compares
+/// fields like `reservedColumns` etc., which are not part of the data.
+template <typename T>
+bool Matrix<T>::operator==(const Matrix<T> &m) const {
+ if (nRows != m.getNumRows())
+ return false;
+ if (nColumns != m.getNumColumns())
+ return false;
+
+ for (unsigned i = 0; i < nRows; i++)
+ if (getRow(i) != m.getRow(i))
+ return false;
+
+ return true;
+}
+
template <typename T>
Matrix<T> Matrix<T>::identity(unsigned dimension) {
Matrix matrix(dimension, dimension);
@@ -296,6 +312,12 @@ void Matrix<T>::addToRow(unsigned row, ArrayRef<T> rowVec, const T &scale) {
}
template <typename T>
+void Matrix<T>::scaleRow(unsigned row, const T &scale) {
+ for (unsigned col = 0; col < nColumns; ++col)
+ at(row, col) *= scale;
+}
+
+template <typename T>
void Matrix<T>::addToColumn(unsigned sourceColumn, unsigned targetColumn,
const T &scale) {
if (scale == 0)
@@ -317,6 +339,12 @@ void Matrix<T>::negateRow(unsigned row) {
}
template <typename T>
+void Matrix<T>::negateMatrix() {
+ for (unsigned row = 0; row < nRows; ++row)
+ negateRow(row);
+}
+
+template <typename T>
SmallVector<T, 8> Matrix<T>::preMultiplyWithRow(ArrayRef<T> rowVec) const {
assert(rowVec.size() == getNumRows() && "Invalid row vector dimension!");
@@ -355,6 +383,22 @@ static void modEntryColumnOperation(Matrix<MPInt> &m, unsigned row,
}
template <typename T>
+Matrix<T> Matrix<T>::getSubMatrix(unsigned fromRow, unsigned toRow,
+ unsigned fromColumn,
+ unsigned toColumn) const {
+ assert(fromRow <= toRow && "end of row range must be after beginning!");
+ assert(toRow < nRows && "end of row range out of bounds!");
+ assert(fromColumn <= toColumn &&
+ "end of column range must be after beginning!");
+ assert(toColumn < nColumns && "end of column range out of bounds!");
+ Matrix<T> subMatrix(toRow - fromRow + 1, toColumn - fromColumn + 1);
+ for (unsigned i = fromRow; i <= toRow; ++i)
+ for (unsigned j = fromColumn; j <= toColumn; ++j)
+ subMatrix(i - fromRow, j - fromColumn) = at(i, j);
+ return subMatrix;
+}
+
+template <typename T>
void Matrix<T>::print(raw_ostream &os) const {
for (unsigned row = 0; row < nRows; ++row) {
for (unsigned column = 0; column < nColumns; ++column)
@@ -363,6 +407,21 @@ void Matrix<T>::print(raw_ostream &os) const {
}
}
+/// We iterate over the `indicator` bitset, checking each bit. If a bit is 1,
+/// we append it to one matrix, and if it is zero, we append it to the other.
+template <typename T>
+std::pair<Matrix<T>, Matrix<T>>
+Matrix<T>::splitByBitset(ArrayRef<int> indicator) {
+ Matrix<T> rowsForOne(0, nColumns), rowsForZero(0, nColumns);
+ for (unsigned i = 0; i < nRows; i++) {
+ if (indicator[i] == 1)
+ rowsForOne.appendExtraRow(getRow(i));
+ else
+ rowsForZero.appendExtraRow(getRow(i));
+ }
+ return {rowsForOne, rowsForZero};
+}
+
template <typename T>
void Matrix<T>::dump() const {
print(llvm::errs());
@@ -697,3 +756,20 @@ void FracMatrix::LLL(Fraction delta) {
}
}
}
+
+IntMatrix FracMatrix::normalizeRows() const {
+ unsigned numRows = getNumRows();
+ unsigned numColumns = getNumColumns();
+ IntMatrix normalized(numRows, numColumns);
+
+ MPInt lcmDenoms = MPInt(1);
+ for (unsigned i = 0; i < numRows; i++) {
+ // For a row, first compute the LCM of the denominators.
+ for (unsigned j = 0; j < numColumns; j++)
+ lcmDenoms = lcm(lcmDenoms, at(i, j).den);
+ // Then, multiply by it throughout and convert to integers.
+ for (unsigned j = 0; j < numColumns; j++)
+ normalized(i, j) = (at(i, j) * lcmDenoms).getAsInteger();
+ }
+ return normalized;
+}
diff --git a/mlir/lib/Analysis/Presburger/PresburgerRelation.cpp b/mlir/lib/Analysis/Presburger/PresburgerRelation.cpp
index 787fc1c..3af6baa 100644
--- a/mlir/lib/Analysis/Presburger/PresburgerRelation.cpp
+++ b/mlir/lib/Analysis/Presburger/PresburgerRelation.cpp
@@ -1041,6 +1041,12 @@ PresburgerRelation PresburgerRelation::simplify() const {
return result;
}
+bool PresburgerRelation::isFullDim() const {
+ return llvm::any_of(getAllDisjuncts(), [&](IntegerRelation disjunct) {
+ return disjunct.isFullDim();
+ });
+}
+
void PresburgerRelation::print(raw_ostream &os) const {
os << "Number of Disjuncts: " << getNumDisjuncts() << "\n";
for (const IntegerRelation &disjunct : disjuncts) {
diff --git a/mlir/lib/Analysis/Presburger/Simplex.cpp b/mlir/lib/Analysis/Presburger/Simplex.cpp
index 42bbc33..1969cce 100644
--- a/mlir/lib/Analysis/Presburger/Simplex.cpp
+++ b/mlir/lib/Analysis/Presburger/Simplex.cpp
@@ -2104,6 +2104,19 @@ Simplex::computeIntegerBounds(ArrayRef<MPInt> coeffs) {
return {minRoundedUp, maxRoundedDown};
}
+bool Simplex::isFlatAlong(ArrayRef<MPInt> coeffs) {
+ assert(!isEmpty() && "cannot check for flatness of empty simplex!");
+ auto upOpt = computeOptimum(Simplex::Direction::Up, coeffs);
+ auto downOpt = computeOptimum(Simplex::Direction::Down, coeffs);
+
+ if (!upOpt.isBounded())
+ return false;
+ if (!downOpt.isBounded())
+ return false;
+
+ return *upOpt == *downOpt;
+}
+
void SimplexBase::print(raw_ostream &os) const {
os << "rows = " << getNumRows() << ", columns = " << getNumColumns() << "\n";
if (empty)
diff --git a/mlir/lib/Analysis/Presburger/Utils.cpp b/mlir/lib/Analysis/Presburger/Utils.cpp
index a8d8608..f717a4d 100644
--- a/mlir/lib/Analysis/Presburger/Utils.cpp
+++ b/mlir/lib/Analysis/Presburger/Utils.cpp
@@ -564,4 +564,8 @@ std::vector<Fraction> presburger::multiplyPolynomials(ArrayRef<Fraction> a,
convolution.push_back(sum);
}
return convolution;
-} \ No newline at end of file
+}
+
+bool presburger::isRangeZero(ArrayRef<Fraction> arr) {
+ return llvm::all_of(arr, [&](Fraction f) { return f == 0; });
+}
diff --git a/mlir/lib/Bindings/Python/DialectLLVM.cpp b/mlir/lib/Bindings/Python/DialectLLVM.cpp
new file mode 100644
index 0000000..780f5ea
--- /dev/null
+++ b/mlir/lib/Bindings/Python/DialectLLVM.cpp
@@ -0,0 +1,145 @@
+//===- DialectLLVM.cpp - Pybind module for LLVM dialect API support -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir-c/Diagnostics.h"
+#include "mlir-c/Dialect/LLVM.h"
+#include "mlir-c/IR.h"
+#include "mlir-c/Support.h"
+#include "mlir/Bindings/Python/PybindAdaptors.h"
+#include <string>
+
+namespace py = pybind11;
+using namespace llvm;
+using namespace mlir;
+using namespace mlir::python;
+using namespace mlir::python::adaptors;
+
+/// RAII scope intercepting all diagnostics into a string. The message must be
+/// checked before this goes out of scope.
+class CollectDiagnosticsToStringScope {
+public:
+ explicit CollectDiagnosticsToStringScope(MlirContext ctx) : context(ctx) {
+ handlerID = mlirContextAttachDiagnosticHandler(ctx, &handler, &errorMessage,
+ /*deleteUserData=*/nullptr);
+ }
+ ~CollectDiagnosticsToStringScope() {
+ assert(errorMessage.empty() && "unchecked error message");
+ mlirContextDetachDiagnosticHandler(context, handlerID);
+ }
+
+ [[nodiscard]] std::string takeMessage() { return std::move(errorMessage); }
+
+private:
+ static MlirLogicalResult handler(MlirDiagnostic diag, void *data) {
+ auto printer = +[](MlirStringRef message, void *data) {
+ *static_cast<std::string *>(data) +=
+ StringRef(message.data, message.length);
+ };
+ mlirDiagnosticPrint(diag, printer, data);
+ return mlirLogicalResultSuccess();
+ }
+
+ MlirContext context;
+ MlirDiagnosticHandlerID handlerID;
+ std::string errorMessage = "";
+};
+
+void populateDialectLLVMSubmodule(const pybind11::module &m) {
+ auto llvmStructType =
+ mlir_type_subclass(m, "StructType", mlirTypeIsALLVMStructType);
+
+ llvmStructType.def_classmethod(
+ "get_literal",
+ [](py::object cls, const std::vector<MlirType> &elements, bool packed,
+ MlirLocation loc) {
+ CollectDiagnosticsToStringScope scope(mlirLocationGetContext(loc));
+
+ MlirType type = mlirLLVMStructTypeLiteralGetChecked(
+ loc, elements.size(), elements.data(), packed);
+ if (mlirTypeIsNull(type)) {
+ throw py::value_error(scope.takeMessage());
+ }
+ return cls(type);
+ },
+ py::arg("cls"), py::arg("elements"), py::kw_only(),
+ py::arg("packed") = false, py::arg("loc") = py::none());
+
+ llvmStructType.def_classmethod(
+ "get_identified",
+ [](py::object cls, const std::string &name, MlirContext context) {
+ return cls(mlirLLVMStructTypeIdentifiedGet(
+ context, mlirStringRefCreate(name.data(), name.size())));
+ },
+ py::arg("cls"), py::arg("name"), py::kw_only(),
+ py::arg("context") = py::none());
+
+ llvmStructType.def_classmethod(
+ "get_opaque",
+ [](py::object cls, const std::string &name, MlirContext context) {
+ return cls(mlirLLVMStructTypeOpaqueGet(
+ context, mlirStringRefCreate(name.data(), name.size())));
+ },
+ py::arg("cls"), py::arg("name"), py::arg("context") = py::none());
+
+ llvmStructType.def(
+ "set_body",
+ [](MlirType self, const std::vector<MlirType> &elements, bool packed) {
+ MlirLogicalResult result = mlirLLVMStructTypeSetBody(
+ self, elements.size(), elements.data(), packed);
+ if (!mlirLogicalResultIsSuccess(result)) {
+ throw py::value_error(
+ "Struct body already set to different content.");
+ }
+ },
+ py::arg("elements"), py::kw_only(), py::arg("packed") = false);
+
+ llvmStructType.def_classmethod(
+ "new_identified",
+ [](py::object cls, const std::string &name,
+ const std::vector<MlirType> &elements, bool packed, MlirContext ctx) {
+ return cls(mlirLLVMStructTypeIdentifiedNewGet(
+ ctx, mlirStringRefCreate(name.data(), name.length()),
+ elements.size(), elements.data(), packed));
+ },
+ py::arg("cls"), py::arg("name"), py::arg("elements"), py::kw_only(),
+ py::arg("packed") = false, py::arg("context") = py::none());
+
+ llvmStructType.def_property_readonly(
+ "name", [](MlirType type) -> std::optional<std::string> {
+ if (mlirLLVMStructTypeIsLiteral(type))
+ return std::nullopt;
+
+ MlirStringRef stringRef = mlirLLVMStructTypeGetIdentifier(type);
+ return StringRef(stringRef.data, stringRef.length).str();
+ });
+
+ llvmStructType.def_property_readonly("body", [](MlirType type) -> py::object {
+ // Don't crash in absence of a body.
+ if (mlirLLVMStructTypeIsOpaque(type))
+ return py::none();
+
+ py::list body;
+ for (intptr_t i = 0, e = mlirLLVMStructTypeGetNumElementTypes(type); i < e;
+ ++i) {
+ body.append(mlirLLVMStructTypeGetElementType(type, i));
+ }
+ return body;
+ });
+
+ llvmStructType.def_property_readonly(
+ "packed", [](MlirType type) { return mlirLLVMStructTypeIsPacked(type); });
+
+ llvmStructType.def_property_readonly(
+ "opaque", [](MlirType type) { return mlirLLVMStructTypeIsOpaque(type); });
+}
+
+PYBIND11_MODULE(_mlirDialectsLLVM, m) {
+ m.doc() = "MLIR LLVM Dialect";
+
+ populateDialectLLVMSubmodule(m);
+}
diff --git a/mlir/lib/Bindings/Python/DialectSparseTensor.cpp b/mlir/lib/Bindings/Python/DialectSparseTensor.cpp
index 74f4d24..171faf9 100644
--- a/mlir/lib/Bindings/Python/DialectSparseTensor.cpp
+++ b/mlir/lib/Bindings/Python/DialectSparseTensor.cpp
@@ -23,24 +23,17 @@ using namespace mlir;
using namespace mlir::python::adaptors;
static void populateDialectSparseTensorSubmodule(const py::module &m) {
- py::enum_<MlirBaseSparseTensorLevelType>(m, "LevelType", py::module_local())
+ py::enum_<MlirSparseTensorLevelFormat>(m, "LevelFormat", py::module_local())
.value("dense", MLIR_SPARSE_TENSOR_LEVEL_DENSE)
.value("n_out_of_m", MLIR_SPARSE_TENSOR_LEVEL_N_OUT_OF_M)
.value("compressed", MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED)
- .value("compressed_nu", MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU)
- .value("compressed_no", MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NO)
- .value("compressed_nu_no", MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU_NO)
.value("singleton", MLIR_SPARSE_TENSOR_LEVEL_SINGLETON)
- .value("singleton_nu", MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU)
- .value("singleton_no", MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NO)
- .value("singleton_nu_no", MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU_NO)
- .value("loose_compressed", MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED)
- .value("loose_compressed_nu",
- MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU)
- .value("loose_compressed_no",
- MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NO)
- .value("loose_compressed_nu_no",
- MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU_NO);
+ .value("loose_compressed", MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED);
+
+ py::enum_<MlirSparseTensorLevelPropertyNondefault>(m, "LevelProperty",
+ py::module_local())
+ .value("non_ordered", MLIR_SPARSE_PROPERTY_NON_ORDERED)
+ .value("non_unique", MLIR_SPARSE_PROPERTY_NON_UNIQUE);
mlir_attribute_subclass(m, "EncodingAttr",
mlirAttributeIsASparseTensorEncodingAttr)
@@ -62,12 +55,17 @@ static void populateDialectSparseTensorSubmodule(const py::module &m) {
"Gets a sparse_tensor.encoding from parameters.")
.def_classmethod(
"build_level_type",
- [](py::object cls, MlirBaseSparseTensorLevelType lvlType, unsigned n,
- unsigned m) {
- return mlirSparseTensorEncodingAttrBuildLvlType(lvlType, n, m);
+ [](py::object cls, MlirSparseTensorLevelFormat lvlFmt,
+ const std::vector<MlirSparseTensorLevelPropertyNondefault>
+ &properties,
+ unsigned n, unsigned m) {
+ return mlirSparseTensorEncodingAttrBuildLvlType(
+ lvlFmt, properties.data(), properties.size(), n, m);
},
- py::arg("cls"), py::arg("lvl_type"), py::arg("n") = 0,
- py::arg("m") = 0,
+ py::arg("cls"), py::arg("lvl_fmt"),
+ py::arg("properties") =
+ std::vector<MlirSparseTensorLevelPropertyNondefault>(),
+ py::arg("n") = 0, py::arg("m") = 0,
"Builds a sparse_tensor.encoding.level_type from parameters.")
.def_property_readonly(
"lvl_types",
@@ -113,17 +111,12 @@ static void populateDialectSparseTensorSubmodule(const py::module &m) {
return mlirSparseTensorEncodingAttrGetStructuredM(
mlirSparseTensorEncodingAttrGetLvlType(self, lvlRank - 1));
})
- .def_property_readonly("lvl_types_enum", [](MlirAttribute self) {
+ .def_property_readonly("lvl_formats_enum", [](MlirAttribute self) {
const int lvlRank = mlirSparseTensorEncodingGetLvlRank(self);
- std::vector<MlirBaseSparseTensorLevelType> ret;
+ std::vector<MlirSparseTensorLevelFormat> ret;
ret.reserve(lvlRank);
- for (int l = 0; l < lvlRank; l++) {
- // Convert level type to 32 bits to ignore n and m for n_out_of_m
- // format.
- ret.push_back(
- static_cast<MlirBaseSparseTensorLevelType>(static_cast<uint32_t>(
- mlirSparseTensorEncodingAttrGetLvlType(self, l))));
- }
+ for (int l = 0; l < lvlRank; l++)
+ ret.push_back(mlirSparseTensorEncodingAttrGetLvlFmt(self, l));
return ret;
});
}
diff --git a/mlir/lib/Bindings/Python/IRTypes.cpp b/mlir/lib/Bindings/Python/IRTypes.cpp
index 820992d..e1e4eb9 100644
--- a/mlir/lib/Bindings/Python/IRTypes.cpp
+++ b/mlir/lib/Bindings/Python/IRTypes.cpp
@@ -109,8 +109,22 @@ public:
}
};
+class PyFloatType : public PyConcreteType<PyFloatType> {
+public:
+ static constexpr IsAFunctionTy isaFunction = mlirTypeIsAFloat;
+ static constexpr const char *pyClassName = "FloatType";
+ using PyConcreteType::PyConcreteType;
+
+ static void bindDerived(ClassTy &c) {
+ c.def_property_readonly(
+ "width", [](PyFloatType &self) { return mlirFloatTypeGetWidth(self); },
+ "Returns the width of the floating-point type");
+ }
+};
+
/// Floating Point Type subclass - Float8E4M3FNType.
-class PyFloat8E4M3FNType : public PyConcreteType<PyFloat8E4M3FNType> {
+class PyFloat8E4M3FNType
+ : public PyConcreteType<PyFloat8E4M3FNType, PyFloatType> {
public:
static constexpr IsAFunctionTy isaFunction = mlirTypeIsAFloat8E4M3FN;
static constexpr GetTypeIDFunctionTy getTypeIdFunction =
@@ -130,7 +144,7 @@ public:
};
/// Floating Point Type subclass - Float8M5E2Type.
-class PyFloat8E5M2Type : public PyConcreteType<PyFloat8E5M2Type> {
+class PyFloat8E5M2Type : public PyConcreteType<PyFloat8E5M2Type, PyFloatType> {
public:
static constexpr IsAFunctionTy isaFunction = mlirTypeIsAFloat8E5M2;
static constexpr GetTypeIDFunctionTy getTypeIdFunction =
@@ -150,7 +164,8 @@ public:
};
/// Floating Point Type subclass - Float8E4M3FNUZ.
-class PyFloat8E4M3FNUZType : public PyConcreteType<PyFloat8E4M3FNUZType> {
+class PyFloat8E4M3FNUZType
+ : public PyConcreteType<PyFloat8E4M3FNUZType, PyFloatType> {
public:
static constexpr IsAFunctionTy isaFunction = mlirTypeIsAFloat8E4M3FNUZ;
static constexpr GetTypeIDFunctionTy getTypeIdFunction =
@@ -170,7 +185,8 @@ public:
};
/// Floating Point Type subclass - Float8E4M3B11FNUZ.
-class PyFloat8E4M3B11FNUZType : public PyConcreteType<PyFloat8E4M3B11FNUZType> {
+class PyFloat8E4M3B11FNUZType
+ : public PyConcreteType<PyFloat8E4M3B11FNUZType, PyFloatType> {
public:
static constexpr IsAFunctionTy isaFunction = mlirTypeIsAFloat8E4M3B11FNUZ;
static constexpr GetTypeIDFunctionTy getTypeIdFunction =
@@ -190,7 +206,8 @@ public:
};
/// Floating Point Type subclass - Float8E5M2FNUZ.
-class PyFloat8E5M2FNUZType : public PyConcreteType<PyFloat8E5M2FNUZType> {
+class PyFloat8E5M2FNUZType
+ : public PyConcreteType<PyFloat8E5M2FNUZType, PyFloatType> {
public:
static constexpr IsAFunctionTy isaFunction = mlirTypeIsAFloat8E5M2FNUZ;
static constexpr GetTypeIDFunctionTy getTypeIdFunction =
@@ -210,7 +227,7 @@ public:
};
/// Floating Point Type subclass - BF16Type.
-class PyBF16Type : public PyConcreteType<PyBF16Type> {
+class PyBF16Type : public PyConcreteType<PyBF16Type, PyFloatType> {
public:
static constexpr IsAFunctionTy isaFunction = mlirTypeIsABF16;
static constexpr GetTypeIDFunctionTy getTypeIdFunction =
@@ -230,7 +247,7 @@ public:
};
/// Floating Point Type subclass - F16Type.
-class PyF16Type : public PyConcreteType<PyF16Type> {
+class PyF16Type : public PyConcreteType<PyF16Type, PyFloatType> {
public:
static constexpr IsAFunctionTy isaFunction = mlirTypeIsAF16;
static constexpr GetTypeIDFunctionTy getTypeIdFunction =
@@ -250,7 +267,7 @@ public:
};
/// Floating Point Type subclass - TF32Type.
-class PyTF32Type : public PyConcreteType<PyTF32Type> {
+class PyTF32Type : public PyConcreteType<PyTF32Type, PyFloatType> {
public:
static constexpr IsAFunctionTy isaFunction = mlirTypeIsATF32;
static constexpr GetTypeIDFunctionTy getTypeIdFunction =
@@ -270,7 +287,7 @@ public:
};
/// Floating Point Type subclass - F32Type.
-class PyF32Type : public PyConcreteType<PyF32Type> {
+class PyF32Type : public PyConcreteType<PyF32Type, PyFloatType> {
public:
static constexpr IsAFunctionTy isaFunction = mlirTypeIsAF32;
static constexpr GetTypeIDFunctionTy getTypeIdFunction =
@@ -290,7 +307,7 @@ public:
};
/// Floating Point Type subclass - F64Type.
-class PyF64Type : public PyConcreteType<PyF64Type> {
+class PyF64Type : public PyConcreteType<PyF64Type, PyFloatType> {
public:
static constexpr IsAFunctionTy isaFunction = mlirTypeIsAF64;
static constexpr GetTypeIDFunctionTy getTypeIdFunction =
@@ -819,6 +836,7 @@ public:
void mlir::python::populateIRTypes(py::module &m) {
PyIntegerType::bind(m);
+ PyFloatType::bind(m);
PyIndexType::bind(m);
PyFloat8E4M3FNType::bind(m);
PyFloat8E5M2Type::bind(m);
diff --git a/mlir/lib/CAPI/Dialect/LLVM.cpp b/mlir/lib/CAPI/Dialect/LLVM.cpp
index b4405f7..642018a 100644
--- a/mlir/lib/CAPI/Dialect/LLVM.cpp
+++ b/mlir/lib/CAPI/Dialect/LLVM.cpp
@@ -36,11 +36,77 @@ MlirType mlirLLVMFunctionTypeGet(MlirType resultType, intptr_t nArgumentTypes,
unwrapList(nArgumentTypes, argumentTypes, argumentStorage), isVarArg));
}
+bool mlirTypeIsALLVMStructType(MlirType type) {
+ return isa<LLVM::LLVMStructType>(unwrap(type));
+}
+
+bool mlirLLVMStructTypeIsLiteral(MlirType type) {
+ return !cast<LLVM::LLVMStructType>(unwrap(type)).isIdentified();
+}
+
+intptr_t mlirLLVMStructTypeGetNumElementTypes(MlirType type) {
+ return cast<LLVM::LLVMStructType>(unwrap(type)).getBody().size();
+}
+
+MlirType mlirLLVMStructTypeGetElementType(MlirType type, intptr_t position) {
+ return wrap(cast<LLVM::LLVMStructType>(unwrap(type)).getBody()[position]);
+}
+
+bool mlirLLVMStructTypeIsPacked(MlirType type) {
+ return cast<LLVM::LLVMStructType>(unwrap(type)).isPacked();
+}
+
+MlirStringRef mlirLLVMStructTypeGetIdentifier(MlirType type) {
+ return wrap(cast<LLVM::LLVMStructType>(unwrap(type)).getName());
+}
+
+bool mlirLLVMStructTypeIsOpaque(MlirType type) {
+ return cast<LLVM::LLVMStructType>(unwrap(type)).isOpaque();
+}
+
MlirType mlirLLVMStructTypeLiteralGet(MlirContext ctx, intptr_t nFieldTypes,
MlirType const *fieldTypes,
bool isPacked) {
- SmallVector<Type, 2> fieldStorage;
+ SmallVector<Type> fieldStorage;
return wrap(LLVMStructType::getLiteral(
unwrap(ctx), unwrapList(nFieldTypes, fieldTypes, fieldStorage),
isPacked));
}
+
+MlirType mlirLLVMStructTypeLiteralGetChecked(MlirLocation loc,
+ intptr_t nFieldTypes,
+ MlirType const *fieldTypes,
+ bool isPacked) {
+ SmallVector<Type> fieldStorage;
+ return wrap(LLVMStructType::getLiteralChecked(
+ [loc]() { return emitError(unwrap(loc)); }, unwrap(loc)->getContext(),
+ unwrapList(nFieldTypes, fieldTypes, fieldStorage), isPacked));
+}
+
+MlirType mlirLLVMStructTypeOpaqueGet(MlirContext ctx, MlirStringRef name) {
+ return wrap(LLVMStructType::getOpaque(unwrap(name), unwrap(ctx)));
+}
+
+MlirType mlirLLVMStructTypeIdentifiedGet(MlirContext ctx, MlirStringRef name) {
+ return wrap(LLVMStructType::getIdentified(unwrap(ctx), unwrap(name)));
+}
+
+MlirType mlirLLVMStructTypeIdentifiedNewGet(MlirContext ctx, MlirStringRef name,
+ intptr_t nFieldTypes,
+ MlirType const *fieldTypes,
+ bool isPacked) {
+ SmallVector<Type> fields;
+ return wrap(LLVMStructType::getNewIdentified(
+ unwrap(ctx), unwrap(name), unwrapList(nFieldTypes, fieldTypes, fields),
+ isPacked));
+}
+
+MlirLogicalResult mlirLLVMStructTypeSetBody(MlirType structType,
+ intptr_t nFieldTypes,
+ MlirType const *fieldTypes,
+ bool isPacked) {
+ SmallVector<Type> fields;
+ return wrap(
+ cast<LLVM::LLVMStructType>(unwrap(structType))
+ .setBody(unwrapList(nFieldTypes, fieldTypes, fields), isPacked));
+}
diff --git a/mlir/lib/CAPI/Dialect/SparseTensor.cpp b/mlir/lib/CAPI/Dialect/SparseTensor.cpp
index 4e1bd45..55af8be 100644
--- a/mlir/lib/CAPI/Dialect/SparseTensor.cpp
+++ b/mlir/lib/CAPI/Dialect/SparseTensor.cpp
@@ -22,34 +22,23 @@ MLIR_DEFINE_CAPI_DIALECT_REGISTRATION(SparseTensor, sparse_tensor,
// Ensure the C-API enums are int-castable to C++ equivalents.
static_assert(
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_DENSE) ==
- static_cast<int>(LevelType::Dense) &&
+ static_cast<int>(LevelFormat::Dense) &&
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED) ==
- static_cast<int>(LevelType::Compressed) &&
- static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU) ==
- static_cast<int>(LevelType::CompressedNu) &&
- static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NO) ==
- static_cast<int>(LevelType::CompressedNo) &&
- static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU_NO) ==
- static_cast<int>(LevelType::CompressedNuNo) &&
+ static_cast<int>(LevelFormat::Compressed) &&
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_SINGLETON) ==
- static_cast<int>(LevelType::Singleton) &&
- static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU) ==
- static_cast<int>(LevelType::SingletonNu) &&
- static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NO) ==
- static_cast<int>(LevelType::SingletonNo) &&
- static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU_NO) ==
- static_cast<int>(LevelType::SingletonNuNo) &&
+ static_cast<int>(LevelFormat::Singleton) &&
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED) ==
- static_cast<int>(LevelType::LooseCompressed) &&
- static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU) ==
- static_cast<int>(LevelType::LooseCompressedNu) &&
- static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NO) ==
- static_cast<int>(LevelType::LooseCompressedNo) &&
- static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU_NO) ==
- static_cast<int>(LevelType::LooseCompressedNuNo) &&
+ static_cast<int>(LevelFormat::LooseCompressed) &&
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_N_OUT_OF_M) ==
- static_cast<int>(LevelType::NOutOfM),
- "MlirSparseTensorLevelType (C-API) and LevelType (C++) mismatch");
+ static_cast<int>(LevelFormat::NOutOfM),
+ "MlirSparseTensorLevelFormat (C-API) and LevelFormat (C++) mismatch");
+
+static_assert(static_cast<int>(MLIR_SPARSE_PROPERTY_NON_ORDERED) ==
+ static_cast<int>(LevelPropertyNondefault::Nonordered) &&
+ static_cast<int>(MLIR_SPARSE_PROPERTY_NON_UNIQUE) ==
+ static_cast<int>(LevelPropertyNondefault::Nonunique),
+ "MlirSparseTensorLevelProperty (C-API) and "
+ "LevelPropertyNondefault (C++) mismatch");
bool mlirAttributeIsASparseTensorEncodingAttr(MlirAttribute attr) {
return isa<SparseTensorEncodingAttr>(unwrap(attr));
@@ -87,6 +76,13 @@ mlirSparseTensorEncodingAttrGetLvlType(MlirAttribute attr, intptr_t lvl) {
cast<SparseTensorEncodingAttr>(unwrap(attr)).getLvlType(lvl));
}
+enum MlirSparseTensorLevelFormat
+mlirSparseTensorEncodingAttrGetLvlFmt(MlirAttribute attr, intptr_t lvl) {
+ LevelType lt =
+ static_cast<LevelType>(mlirSparseTensorEncodingAttrGetLvlType(attr, lvl));
+ return static_cast<MlirSparseTensorLevelFormat>(*getLevelFormat(lt));
+}
+
int mlirSparseTensorEncodingAttrGetPosWidth(MlirAttribute attr) {
return cast<SparseTensorEncodingAttr>(unwrap(attr)).getPosWidth();
}
@@ -95,12 +91,17 @@ int mlirSparseTensorEncodingAttrGetCrdWidth(MlirAttribute attr) {
return cast<SparseTensorEncodingAttr>(unwrap(attr)).getCrdWidth();
}
-MlirSparseTensorLevelType
-mlirSparseTensorEncodingAttrBuildLvlType(MlirBaseSparseTensorLevelType lvlType,
- unsigned n, unsigned m) {
- LevelType lt = static_cast<LevelType>(lvlType);
- return static_cast<MlirSparseTensorLevelType>(*buildLevelType(
- *getLevelFormat(lt), isOrderedLT(lt), isUniqueLT(lt), n, m));
+MlirSparseTensorLevelType mlirSparseTensorEncodingAttrBuildLvlType(
+ enum MlirSparseTensorLevelFormat lvlFmt,
+ const enum MlirSparseTensorLevelPropertyNondefault *properties,
+ unsigned size, unsigned n, unsigned m) {
+
+ std::vector<LevelPropertyNondefault> props;
+ for (unsigned i = 0; i < size; i++)
+ props.push_back(static_cast<LevelPropertyNondefault>(properties[i]));
+
+ return static_cast<MlirSparseTensorLevelType>(
+ *buildLevelType(static_cast<LevelFormat>(lvlFmt), props, n, m));
}
unsigned
diff --git a/mlir/lib/CAPI/IR/BuiltinTypes.cpp b/mlir/lib/CAPI/IR/BuiltinTypes.cpp
index 18c9414..e1a5d82 100644
--- a/mlir/lib/CAPI/IR/BuiltinTypes.cpp
+++ b/mlir/lib/CAPI/IR/BuiltinTypes.cpp
@@ -78,6 +78,14 @@ MlirType mlirIndexTypeGet(MlirContext ctx) {
// Floating-point types.
//===----------------------------------------------------------------------===//
+bool mlirTypeIsAFloat(MlirType type) {
+ return llvm::isa<FloatType>(unwrap(type));
+}
+
+unsigned mlirFloatTypeGetWidth(MlirType type) {
+ return llvm::cast<FloatType>(unwrap(type)).getWidth();
+}
+
MlirTypeID mlirFloat8E5M2TypeGetTypeID() {
return wrap(Float8E5M2Type::getTypeID());
}
diff --git a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
index 5080956..9b5d19e 100644
--- a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
+++ b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
@@ -956,7 +956,8 @@ struct NVGPUMBarrierTryWaitParityLowering
getMbarrierPtr(b, op.getBarriers().getType(), adaptor.getBarriers(),
adaptor.getMbarId(), rewriter);
Value ticks = truncToI32(b, adaptor.getTicks());
- Value phase = truncToI32(b, adaptor.getPhase());
+ Value phase =
+ b.create<LLVM::ZExtOp>(b.getI32Type(), adaptor.getPhaseParity());
if (isMbarrierShared(op.getBarriers().getType())) {
rewriter.replaceOpWithNewOp<NVVM::MBarrierTryWaitParitySharedOp>(
diff --git a/mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp b/mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp
index 2f8b3f7..ea5f31e 100644
--- a/mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp
+++ b/mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp
@@ -367,9 +367,11 @@ struct ParallelOpLowering : public OpRewritePattern<scf::ParallelOp> {
// TODO: consider checking it here is already a compatible reduction
// declaration and use it instead of redeclaring.
SmallVector<Attribute> reductionDeclSymbols;
+ SmallVector<omp::ReductionDeclareOp> ompReductionDecls;
auto reduce = cast<scf::ReduceOp>(parallelOp.getBody()->getTerminator());
for (int64_t i = 0, e = parallelOp.getNumReductions(); i < e; ++i) {
omp::ReductionDeclareOp decl = declareReduction(rewriter, reduce, i);
+ ompReductionDecls.push_back(decl);
if (!decl)
return failure();
reductionDeclSymbols.push_back(
@@ -398,11 +400,39 @@ struct ParallelOpLowering : public OpRewritePattern<scf::ParallelOp> {
// Replace the reduction operations contained in this loop. Must be done
// here rather than in a separate pattern to have access to the list of
// reduction variables.
- for (auto [x, y] :
- llvm::zip_equal(reductionVariables, reduce.getOperands())) {
+ for (auto [x, y, rD] : llvm::zip_equal(
+ reductionVariables, reduce.getOperands(), ompReductionDecls)) {
OpBuilder::InsertionGuard guard(rewriter);
rewriter.setInsertionPoint(reduce);
- rewriter.create<omp::ReductionOp>(reduce.getLoc(), y, x);
+ Region &redRegion = rD.getReductionRegion();
+ // The SCF dialect by definition contains only structured operations
+ // and hence the SCF reduction region will contain a single block.
+ // The ompReductionDecls region is a copy of the SCF reduction region
+ // and hence has the same property.
+ assert(redRegion.hasOneBlock() &&
+ "expect reduction region to have one block");
+ Value pvtRedVar = parallelOp.getRegion().addArgument(x.getType(), loc);
+ Value pvtRedVal = rewriter.create<LLVM::LoadOp>(reduce.getLoc(),
+ rD.getType(), pvtRedVar);
+ // Make a copy of the reduction combiner region in the body
+ mlir::OpBuilder builder(rewriter.getContext());
+ builder.setInsertionPoint(reduce);
+ mlir::IRMapping mapper;
+ assert(redRegion.getNumArguments() == 2 &&
+ "expect reduction region to have two arguments");
+ mapper.map(redRegion.getArgument(0), pvtRedVal);
+ mapper.map(redRegion.getArgument(1), y);
+ for (auto &op : redRegion.getOps()) {
+ Operation *cloneOp = builder.clone(op, mapper);
+ if (auto yieldOp = dyn_cast<omp::YieldOp>(*cloneOp)) {
+ assert(yieldOp && yieldOp.getResults().size() == 1 &&
+ "expect YieldOp in reduction region to return one result");
+ Value redVal = yieldOp.getResults()[0];
+ rewriter.create<LLVM::StoreOp>(loc, redVal, pvtRedVar);
+ rewriter.eraseOp(yieldOp);
+ break;
+ }
+ }
}
rewriter.eraseOp(reduce);
diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
index b66b55a..19cc914 100644
--- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
+++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
@@ -1734,6 +1734,45 @@ struct VectorSplatNdOpLowering : public ConvertOpToLLVMPattern<SplatOp> {
}
};
+/// Conversion pattern for a `vector.interleave`.
+/// This supports fixed-sized vectors and scalable vectors.
+struct VectorInterleaveOpLowering
+ : public ConvertOpToLLVMPattern<vector::InterleaveOp> {
+ using ConvertOpToLLVMPattern::ConvertOpToLLVMPattern;
+
+ LogicalResult
+ matchAndRewrite(vector::InterleaveOp interleaveOp, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const override {
+ VectorType resultType = interleaveOp.getResultVectorType();
+ // n-D interleaves should have been lowered already.
+ if (resultType.getRank() != 1)
+ return rewriter.notifyMatchFailure(interleaveOp,
+ "InterleaveOp not rank 1");
+ // If the result is rank 1, then this directly maps to LLVM.
+ if (resultType.isScalable()) {
+ rewriter.replaceOpWithNewOp<LLVM::experimental_vector_interleave2>(
+ interleaveOp, typeConverter->convertType(resultType),
+ adaptor.getLhs(), adaptor.getRhs());
+ return success();
+ }
+ // Lower fixed-size interleaves to a shufflevector. While the
+ // vector.interleave2 intrinsic supports fixed and scalable vectors, the
+ // langref still recommends fixed-vectors use shufflevector, see:
+ // https://llvm.org/docs/LangRef.html#id876.
+ int64_t resultVectorSize = resultType.getNumElements();
+ SmallVector<int32_t> interleaveShuffleMask;
+ interleaveShuffleMask.reserve(resultVectorSize);
+ for (int i = 0, end = resultVectorSize / 2; i < end; ++i) {
+ interleaveShuffleMask.push_back(i);
+ interleaveShuffleMask.push_back((resultVectorSize / 2) + i);
+ }
+ rewriter.replaceOpWithNewOp<LLVM::ShuffleVectorOp>(
+ interleaveOp, adaptor.getLhs(), adaptor.getRhs(),
+ interleaveShuffleMask);
+ return success();
+ }
+};
+
} // namespace
/// Populate the given list with patterns that convert from Vector to LLVM.
@@ -1758,7 +1797,8 @@ void mlir::populateVectorToLLVMConversionPatterns(
VectorExpandLoadOpConversion, VectorCompressStoreOpConversion,
VectorSplatOpLowering, VectorSplatNdOpLowering,
VectorScalableInsertOpLowering, VectorScalableExtractOpLowering,
- MaskedReductionOpConversion>(converter);
+ MaskedReductionOpConversion, VectorInterleaveOpLowering>(
+ converter);
// Transfer ops with rank > 1 are handled by VectorToSCF.
populateVectorTransferLoweringPatterns(patterns, /*maxTransferRank=*/1);
}
diff --git a/mlir/lib/Dialect/AMDGPU/CMakeLists.txt b/mlir/lib/Dialect/AMDGPU/CMakeLists.txt
index 31167e6..63b4d8b9 100644
--- a/mlir/lib/Dialect/AMDGPU/CMakeLists.txt
+++ b/mlir/lib/Dialect/AMDGPU/CMakeLists.txt
@@ -1,3 +1,4 @@
add_subdirectory(IR)
-add_subdirectory(Transforms)
add_subdirectory(Utils)
+add_subdirectory(TransformOps)
+add_subdirectory(Transforms)
diff --git a/mlir/lib/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.cpp b/mlir/lib/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.cpp
new file mode 100644
index 0000000..ff29f9f
--- /dev/null
+++ b/mlir/lib/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.cpp
@@ -0,0 +1,66 @@
+//===- AMDGPUTransformOps.cpp - Implementation of AMDGPU transform ops-----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.h"
+
+#include "mlir/Dialect/AMDGPU/IR/AMDGPUDialect.h"
+#include "mlir/Dialect/AMDGPU/Transforms/Transforms.h"
+#include "mlir/Dialect/Affine/IR/AffineOps.h"
+#include "mlir/Dialect/Vector/IR/VectorOps.h"
+
+using namespace mlir;
+using namespace mlir::amdgpu;
+using namespace mlir::transform;
+using namespace mlir::func;
+
+#define DEBUG_TYPE "amdgpu-transforms"
+#define DBGS() (llvm::dbgs() << "[" DEBUG_TYPE "]: ")
+#define DBGSNL() (llvm::dbgs() << "\n")
+#define LDBG(X) LLVM_DEBUG(DBGS() << (X) << "\n")
+
+DiagnosedSilenceableFailure
+ApplyOptimizeSharedMemoryReadsAndWritesOp::applyToOne(
+ TransformRewriter &rewriter, FuncOp funcOp, ApplyToEachResultList &results,
+ TransformState &state) {
+ optimizeSharedMemoryReadsAndWritesOp(funcOp);
+ return DiagnosedSilenceableFailure::success();
+}
+
+void ApplyOptimizeSharedMemoryReadsAndWritesOp::getEffects(
+ SmallVectorImpl<MemoryEffects::EffectInstance> &effects) {
+ onlyReadsHandle(getTarget(), effects);
+ modifiesPayload(effects);
+}
+
+//===----------------------------------------------------------------------===//
+// Transform op registration
+//===----------------------------------------------------------------------===//
+
+namespace {
+class AMDGPUTransformDialectExtension
+ : public TransformDialectExtension<AMDGPUTransformDialectExtension> {
+public:
+ AMDGPUTransformDialectExtension() {
+ declareGeneratedDialect<arith::ArithDialect>();
+ declareGeneratedDialect<affine::AffineDialect>();
+ declareGeneratedDialect<amdgpu::AMDGPUDialect>();
+ declareGeneratedDialect<vector::VectorDialect>();
+ registerTransformOps<
+#define GET_OP_LIST
+#include "mlir/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.cpp.inc"
+ >();
+ }
+};
+} // namespace
+
+#define GET_OP_CLASSES
+#include "mlir/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.cpp.inc"
+
+void amdgpu::registerTransformDialectExtension(DialectRegistry &registry) {
+ registry.addExtensions<AMDGPUTransformDialectExtension>();
+}
diff --git a/mlir/lib/Dialect/AMDGPU/TransformOps/CMakeLists.txt b/mlir/lib/Dialect/AMDGPU/TransformOps/CMakeLists.txt
new file mode 100644
index 0000000..c39a3b5
--- /dev/null
+++ b/mlir/lib/Dialect/AMDGPU/TransformOps/CMakeLists.txt
@@ -0,0 +1,25 @@
+add_mlir_dialect_library(MLIRAMDGPUTransformOps
+ AMDGPUTransformOps.cpp
+
+ ADDITIONAL_HEADER_DIRS
+ ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/AMDGPU/TransformOps
+
+ DEPENDS
+ MLIRAMDGPUTransformOpsIncGen
+
+ LINK_LIBS PUBLIC
+ MLIRAffineDialect
+ MLIRArithDialect
+ MLIRIR
+ MLIRLinalgDialect
+ MLIRAMDGPUDialect
+ MLIRAMDGPUTransforms
+ MLIRParser
+ MLIRSideEffectInterfaces
+ MLIRSCFDialect
+ MLIRSCFTransforms
+ MLIRTransformDialect
+ MLIRTransformDialectUtils
+ MLIRVectorTransforms
+
+ )
diff --git a/mlir/lib/Dialect/AMDGPU/Transforms/OptimizeSharedMemory.cpp b/mlir/lib/Dialect/AMDGPU/Transforms/OptimizeSharedMemory.cpp
index c7001fc..7c50a87 100644
--- a/mlir/lib/Dialect/AMDGPU/Transforms/OptimizeSharedMemory.cpp
+++ b/mlir/lib/Dialect/AMDGPU/Transforms/OptimizeSharedMemory.cpp
@@ -24,8 +24,6 @@
#include "mlir/Dialect/Vector/IR/VectorOps.h"
#include "mlir/Interfaces/SideEffectInterfaces.h"
#include "mlir/Support/LogicalResult.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/Support/MathExtras.h"
namespace mlir {
namespace amdgpu {
@@ -52,12 +50,12 @@ constexpr int64_t kDefaultVectorSizeBits = 64;
static Value permuteVectorOffset(OpBuilder &b, Location loc,
ArrayRef<Value> indices, MemRefType memrefTy,
int64_t srcDim, int64_t tgtDim) {
- // Adjust the src index to change how often the permutation changes
- // if necessary.
+ /// Adjust the src index to change how often the permutation changes
+ /// if necessary.
Value src = indices[srcDim];
- // We only want to permute every N iterations of the target dim where N is
- // ceil(sharedMemoryLineSizeBytes / dimSizeBytes(tgtDim)).
+ /// We only want to permute every N iterations of the target dim where N is
+ /// ceil(sharedMemoryLineSizeBytes / dimSizeBytes(tgtDim)).
const int64_t permuteEveryN = std::max<int64_t>(
1, kSharedMemoryLineSizeBytes / ((memrefTy.getDimSize(tgtDim) *
memrefTy.getElementTypeBitWidth()) /
@@ -83,8 +81,8 @@ static Value permuteVectorOffset(OpBuilder &b, Location loc,
Value srcBits = b.create<arith::ConstantIndexOp>(loc, mask);
srcBits = b.create<arith::AndIOp>(loc, src, srcBits);
- // Use the src bits to permute the target bits b[N:M] containing the
- // vector offset.
+ /// Use the src bits to permute the target bits b[N:M] containing the
+ /// vector offset.
if (permuteEveryN > 1) {
int64_t shlBits = n - llvm::Log2_64(permuteEveryN);
if (shlBits > 0) {
@@ -133,8 +131,8 @@ getShmReadAndWriteOps(Operation *parentOp, Value shmMemRef,
writeOps.push_back(op);
});
- // Restrict to a supported set of ops. We also require at least 2D access,
- // although this could be relaxed.
+ /// Restrict to a supported set of ops. We also require at least 2D access,
+ /// although this could be relaxed.
if (llvm::any_of(readOps, [](Operation *op) {
return !isa<memref::LoadOp, vector::LoadOp, vector::TransferReadOp>(
op) ||
@@ -159,15 +157,15 @@ mlir::amdgpu::optimizeSharedMemoryReadsAndWrites(Operation *parentOp,
!amdgpu::AMDGPUDialect::hasSharedMemoryAddressSpace(memRefType))
return failure();
- // Abort if the given value has any sub-views; we do not do any alias
- // analysis.
+ /// Abort if the given value has any sub-views; we do not do any alias
+ /// analysis.
bool hasSubView = false;
parentOp->walk([&](memref::SubViewOp subView) { hasSubView = true; });
if (hasSubView)
return failure();
- // Check if this is necessary given the assumption of 128b accesses:
- // If dim[rank-1] is small enough to fit 8 rows in a 128B line.
+ /// Check if this is necessary given the assumption of 128b accesses:
+ /// If dim[rank-1] is small enough to fit 8 rows in a 128B line.
const int64_t rowSize = memRefType.getDimSize(memRefType.getRank() - 1);
const int64_t rowsPerLine =
(8 * kSharedMemoryLineSizeBytes / memRefType.getElementTypeBitWidth()) /
@@ -177,8 +175,8 @@ mlir::amdgpu::optimizeSharedMemoryReadsAndWrites(Operation *parentOp,
if (rowsPerLine >= threadGroupSize)
return failure();
- // Get sets of operations within the function that read/write to shared
- // memory.
+ /// Get sets of operations within the function that read/write to shared
+ /// memory.
SmallVector<Operation *, 16> shmReadOps;
SmallVector<Operation *, 16> shmWriteOps;
if (failed(getShmReadAndWriteOps(parentOp, memrefValue, shmReadOps,
@@ -193,7 +191,7 @@ mlir::amdgpu::optimizeSharedMemoryReadsAndWrites(Operation *parentOp,
int64_t tgtDim = memRefType.getRank() - 1;
int64_t srcDim = memRefType.getRank() - 2;
- // Transform indices for the ops writing to shared memory.
+ /// Transform indices for the ops writing to shared memory.
while (!shmWriteOps.empty()) {
Operation *shmWriteOp = shmWriteOps.pop_back_val();
builder.setInsertionPoint(shmWriteOp);
@@ -205,7 +203,7 @@ mlir::amdgpu::optimizeSharedMemoryReadsAndWrites(Operation *parentOp,
amdgpu::setIndices(shmWriteOp, transformedIndices);
}
- // Transform indices for the ops reading from shared memory.
+ /// Transform indices for the ops reading from shared memory.
while (!shmReadOps.empty()) {
Operation *shmReadOp = shmReadOps.pop_back_val();
builder.setInsertionPoint(shmReadOp);
@@ -220,6 +218,20 @@ mlir::amdgpu::optimizeSharedMemoryReadsAndWrites(Operation *parentOp,
return success();
}
+void amdgpu::optimizeSharedMemoryReadsAndWritesOp(func::FuncOp funcOp) {
+ SmallVector<memref::AllocOp> shmAllocOps;
+ funcOp.walk([&](memref::AllocOp allocOp) {
+ if (!amdgpu::AMDGPUDialect::hasSharedMemoryAddressSpace(allocOp.getType()))
+ return;
+ shmAllocOps.push_back(allocOp);
+ });
+ for (auto allocOp : shmAllocOps) {
+ if (failed(amdgpu::optimizeSharedMemoryReadsAndWrites(funcOp,
+ allocOp.getMemref())))
+ return;
+ }
+}
+
struct OptimizeSharedMemoryPass
: public amdgpu::impl::OptimizeSharedMemoryBase<OptimizeSharedMemoryPass> {
public:
diff --git a/mlir/lib/Dialect/Async/Transforms/AsyncToAsyncRuntime.cpp b/mlir/lib/Dialect/Async/Transforms/AsyncToAsyncRuntime.cpp
index 828f53c..31e81107 100644
--- a/mlir/lib/Dialect/Async/Transforms/AsyncToAsyncRuntime.cpp
+++ b/mlir/lib/Dialect/Async/Transforms/AsyncToAsyncRuntime.cpp
@@ -582,7 +582,7 @@ public:
// Inside regular functions we use the blocking wait operation to wait for
// the async object (token, value or group) to become available.
if (!isInCoroutine) {
- ImplicitLocOpBuilder builder(loc, op, &rewriter);
+ ImplicitLocOpBuilder builder(loc, rewriter);
builder.create<RuntimeAwaitOp>(loc, operand);
// Assert that the awaited operands is not in the error state.
@@ -601,7 +601,7 @@ public:
CoroMachinery &coro = funcCoro->getSecond();
Block *suspended = op->getBlock();
- ImplicitLocOpBuilder builder(loc, op, &rewriter);
+ ImplicitLocOpBuilder builder(loc, rewriter);
MLIRContext *ctx = op->getContext();
// Save the coroutine state and resume on a runtime managed thread when
diff --git a/mlir/lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp b/mlir/lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp
index 0b47392..935f0de 100644
--- a/mlir/lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp
+++ b/mlir/lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp
@@ -51,7 +51,6 @@ void buildCommonPassPipeline(
pm.addPass(createConvertVectorToSCFPass());
pm.addPass(createConvertSCFToCFPass());
pm.addPass(createConvertNVVMToLLVMPass());
- pm.addPass(createConvertMathToLLVMPass());
pm.addPass(createConvertFuncToLLVMPass());
pm.addPass(memref::createExpandStridedMetadataPass());
@@ -98,6 +97,7 @@ void buildHostPostPipeline(OpPassManager &pm,
GpuModuleToBinaryPassOptions gpuModuleToBinaryPassOptions;
gpuModuleToBinaryPassOptions.compilationTarget = options.cubinFormat;
pm.addPass(createGpuModuleToBinaryPass(gpuModuleToBinaryPassOptions));
+ pm.addPass(createConvertMathToLLVMPass());
pm.addPass(createCanonicalizerPass());
pm.addPass(createCSEPass());
pm.addPass(createReconcileUnrealizedCastsPass());
diff --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
index 9b1ef08..4780ec09 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
@@ -1022,6 +1022,13 @@ LogicalResult NVVM::SetMaxRegisterOp::verify() {
return success();
}
+LogicalResult NVVM::BarrierOp::verify() {
+ if (getNumberOfThreads() && !getBarrierId())
+ return emitOpError(
+ "barrier id is missing, it should be set between 0 to 15");
+ return success();
+}
+
//===----------------------------------------------------------------------===//
// NVVMDialect initialization, type parsing, and registration.
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Padding.cpp b/mlir/lib/Dialect/Linalg/Transforms/Padding.cpp
index 278f349..8c4b70d 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Padding.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Padding.cpp
@@ -23,7 +23,7 @@ using namespace mlir::linalg;
#define DBGSNL() (llvm::dbgs() << "\n")
/// Compute the padded shape of the given operand. The operand is padded to a
-/// static bounding box according to the specified options.
+/// static bounding box according to the specified padding options.
static LogicalResult computePaddedShape(linalg::LinalgOp opToPad,
OpOperand *opOperand,
const LinalgPaddingOptions &options,
@@ -75,7 +75,7 @@ static LogicalResult computePaddedShape(linalg::LinalgOp opToPad,
presburger::BoundType::UB, opOperand->get(),
/*dim=*/i, /*stopCondition=*/nullptr, /*closedUB=*/true);
if (failed(upperBound)) {
- LLVM_DEBUG(DBGS() << "----count not compute a bounding box for padding");
+ LLVM_DEBUG(DBGS() << "----could not compute a bounding box for padding");
return failure();
}
paddedShape[i] = ceil(*upperBound, shapeDimToMultiple[i]);
@@ -89,7 +89,7 @@ static LogicalResult computePaddedShape(linalg::LinalgOp opToPad,
/// the nofold flag found in "paddingValues" and "packPaddings", respectively.
///
/// Exit early and return the `opOperand` value if it already has the requested
-/// shape. I.e.:
+/// shape. i.e.:
/// - static shape
/// - nofold is not set
/// - dim sizes are multiples of "padToMultipleOf"
@@ -232,7 +232,7 @@ linalg::rewriteAsPaddedOp(RewriterBase &rewriter, LinalgOp opToPad,
// Copy back unpadded results to the original destination (i.e., inits of the
// linalg op), so that the destination buffer of the computation does not
- // change. If the padding folds away, this will materizalize as a memcpy
+ // change. If the padding folds away, this will materialize as a memcpy
// between two identical buffers, which will then also fold away.
assert(static_cast<int64_t>(paddedSubtensorResults.size()) ==
opToPad.getNumDpsInits() &&
diff --git a/mlir/lib/Dialect/Math/Transforms/LegalizeToF32.cpp b/mlir/lib/Dialect/Math/Transforms/LegalizeToF32.cpp
index d281790..5998133 100644
--- a/mlir/lib/Dialect/Math/Transforms/LegalizeToF32.cpp
+++ b/mlir/lib/Dialect/Math/Transforms/LegalizeToF32.cpp
@@ -76,20 +76,14 @@ LogicalResult LegalizeToF32RewritePattern::matchAndRewrite(
ConversionPatternRewriter &rewriter) const {
Location loc = op->getLoc();
const TypeConverter *converter = getTypeConverter();
- if (converter->isLegal(op))
- return rewriter.notifyMatchFailure(loc, "op already legal");
- OperationState newOp(loc, op->getName());
- newOp.addOperands(operands);
+ FailureOr<Operation *> legalized =
+ convertOpResultTypes(op, operands, *converter, rewriter);
+ if (failed(legalized))
+ return failure();
- SmallVector<Type> newResultTypes;
- if (failed(converter->convertTypes(op->getResultTypes(), newResultTypes)))
- return rewriter.notifyMatchFailure(loc, "couldn't convert return types");
- newOp.addTypes(newResultTypes);
- newOp.addAttributes(op->getAttrs());
- Operation *legalized = rewriter.create(newOp);
- SmallVector<Value> results = legalized->getResults();
- for (auto [result, newType, origType] :
- llvm::zip_equal(results, newResultTypes, op->getResultTypes())) {
+ SmallVector<Value> results = (*legalized)->getResults();
+ for (auto [result, newType, origType] : llvm::zip_equal(
+ results, (*legalized)->getResultTypes(), op->getResultTypes())) {
if (newType != origType)
result = rewriter.create<arith::TruncFOp>(loc, origType, result);
}
diff --git a/mlir/lib/Dialect/NVGPU/IR/NVGPUDialect.cpp b/mlir/lib/Dialect/NVGPU/IR/NVGPUDialect.cpp
index 4b632747..26f831f 100644
--- a/mlir/lib/Dialect/NVGPU/IR/NVGPUDialect.cpp
+++ b/mlir/lib/Dialect/NVGPU/IR/NVGPUDialect.cpp
@@ -362,7 +362,8 @@ std::optional<InFlightDiagnostic> verifyTmaDescriptorWithMemref(
<< kMaxTMADimension << " but it is " << dim;
}
}
- if (descMemref.getRank() > 1) {
+ if (descMemref.getRank() > 1 &&
+ descType.getSwizzle() != TensorMapSwizzleKind::SWIZZLE_NONE) {
unsigned lastDimensionByte =
descMemref.getElementTypeBitWidth() * descMemref.getShape().back() / 8;
if (lastDimensionByte != kMaxTMALastdimByte)
diff --git a/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp b/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp
index c817422..1635297 100644
--- a/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp
+++ b/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp
@@ -1010,7 +1010,8 @@ void HopperBuilder::buildBarrierArriveTx(
void HopperBuilder::buildTryWaitParity(
TypedValue<nvgpu::MBarrierGroupType> barrier) {
- Value parity = rewriter.create<arith::ConstantIndexOp>(loc, 0);
+ Type i1 = rewriter.getI1Type();
+ Value parity = rewriter.create<LLVM::ConstantOp>(loc, i1, 0);
// 10M is an arbitrary, not too small or too big number to specify the number
// of ticks before retry.
// TODO: hoist this in a default dialect constant.
diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
index 394f062..13fc01d 100644
--- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
+++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
@@ -29,6 +29,7 @@
#include "llvm/ADT/TypeSwitch.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <cstddef>
+#include <iterator>
#include <optional>
#include "mlir/Dialect/OpenMP/OpenMPOpsDialect.cpp.inc"
@@ -459,17 +460,16 @@ parseReductionClause(OpAsmParser &parser, Region &region,
return success();
}
-static void printReductionClause(OpAsmPrinter &p, Operation *op, Region &region,
- ValueRange operands, TypeRange types,
- ArrayAttr reductionSymbols) {
+static void printReductionClause(OpAsmPrinter &p, Operation *op,
+ ValueRange reductionArgs, ValueRange operands,
+ TypeRange types, ArrayAttr reductionSymbols) {
p << "reduction(";
- llvm::interleaveComma(llvm::zip_equal(reductionSymbols, operands,
- region.front().getArguments(), types),
- p, [&p](auto t) {
- auto [sym, op, arg, type] = t;
- p << sym << " " << op << " -> " << arg << " : "
- << type;
- });
+ llvm::interleaveComma(
+ llvm::zip_equal(reductionSymbols, operands, reductionArgs, types), p,
+ [&p](auto t) {
+ auto [sym, op, arg, type] = t;
+ p << sym << " " << op << " -> " << arg << " : " << type;
+ });
p << ") ";
}
@@ -490,7 +490,8 @@ static void printParallelRegion(OpAsmPrinter &p, Operation *op, Region &region,
ValueRange operands, TypeRange types,
ArrayAttr reductionSymbols) {
if (reductionSymbols)
- printReductionClause(p, op, region, operands, types, reductionSymbols);
+ printReductionClause(p, op, region.front().getArguments(), operands, types,
+ reductionSymbols);
p.printRegion(region, /*printEntryBlockArgs=*/false);
}
@@ -628,7 +629,7 @@ static LogicalResult verifyDependVarList(Operation *op,
return op->emitOpError() << "expected as many depend values"
" as depend variables";
} else {
- if (depends)
+ if (depends && !depends->empty())
return op->emitOpError() << "unexpected depend values";
return success();
}
@@ -1032,19 +1033,31 @@ LogicalResult DataOp::verify() {
}
LogicalResult EnterDataOp::verify() {
- return verifyMapClause(*this, getMapOperands());
+ LogicalResult verifyDependVars =
+ verifyDependVarList(*this, getDepends(), getDependVars());
+ return failed(verifyDependVars) ? verifyDependVars
+ : verifyMapClause(*this, getMapOperands());
}
LogicalResult ExitDataOp::verify() {
- return verifyMapClause(*this, getMapOperands());
+ LogicalResult verifyDependVars =
+ verifyDependVarList(*this, getDepends(), getDependVars());
+ return failed(verifyDependVars) ? verifyDependVars
+ : verifyMapClause(*this, getMapOperands());
}
LogicalResult UpdateDataOp::verify() {
- return verifyMapClause(*this, getMapOperands());
+ LogicalResult verifyDependVars =
+ verifyDependVarList(*this, getDepends(), getDependVars());
+ return failed(verifyDependVars) ? verifyDependVars
+ : verifyMapClause(*this, getMapOperands());
}
LogicalResult TargetOp::verify() {
- return verifyMapClause(*this, getMapOperands());
+ LogicalResult verifyDependVars =
+ verifyDependVarList(*this, getDepends(), getDependVars());
+ return failed(verifyDependVars) ? verifyDependVars
+ : verifyMapClause(*this, getMapOperands());
}
//===----------------------------------------------------------------------===//
@@ -1150,6 +1163,84 @@ LogicalResult SingleOp::verify() {
/// loop-bounds := `(` ssa-id-list `)` to `(` ssa-id-list `)` inclusive? steps
/// steps := `step` `(`ssa-id-list`)`
ParseResult
+parseWsLoop(OpAsmParser &parser, Region &region,
+ SmallVectorImpl<OpAsmParser::UnresolvedOperand> &lowerBound,
+ SmallVectorImpl<OpAsmParser::UnresolvedOperand> &upperBound,
+ SmallVectorImpl<OpAsmParser::UnresolvedOperand> &steps,
+ SmallVectorImpl<Type> &loopVarTypes,
+ SmallVectorImpl<OpAsmParser::UnresolvedOperand> &reductionOperands,
+ SmallVectorImpl<Type> &reductionTypes, ArrayAttr &reductionSymbols,
+ UnitAttr &inclusive) {
+
+ // Parse an optional reduction clause
+ llvm::SmallVector<OpAsmParser::Argument> privates;
+ bool hasReduction = succeeded(
+ parseReductionClause(parser, region, reductionOperands, reductionTypes,
+ reductionSymbols, privates));
+
+ if (parser.parseKeyword("for"))
+ return failure();
+
+ // Parse an opening `(` followed by induction variables followed by `)`
+ SmallVector<OpAsmParser::Argument> ivs;
+ Type loopVarType;
+ if (parser.parseArgumentList(ivs, OpAsmParser::Delimiter::Paren) ||
+ parser.parseColonType(loopVarType) ||
+ // Parse loop bounds.
+ parser.parseEqual() ||
+ parser.parseOperandList(lowerBound, ivs.size(),
+ OpAsmParser::Delimiter::Paren) ||
+ parser.parseKeyword("to") ||
+ parser.parseOperandList(upperBound, ivs.size(),
+ OpAsmParser::Delimiter::Paren))
+ return failure();
+
+ if (succeeded(parser.parseOptionalKeyword("inclusive")))
+ inclusive = UnitAttr::get(parser.getBuilder().getContext());
+
+ // Parse step values.
+ if (parser.parseKeyword("step") ||
+ parser.parseOperandList(steps, ivs.size(), OpAsmParser::Delimiter::Paren))
+ return failure();
+
+ // Now parse the body.
+ loopVarTypes = SmallVector<Type>(ivs.size(), loopVarType);
+ for (auto &iv : ivs)
+ iv.type = loopVarType;
+
+ SmallVector<OpAsmParser::Argument> regionArgs{ivs};
+ if (hasReduction)
+ llvm::copy(privates, std::back_inserter(regionArgs));
+
+ return parser.parseRegion(region, regionArgs);
+}
+
+void printWsLoop(OpAsmPrinter &p, Operation *op, Region &region,
+ ValueRange lowerBound, ValueRange upperBound, ValueRange steps,
+ TypeRange loopVarTypes, ValueRange reductionOperands,
+ TypeRange reductionTypes, ArrayAttr reductionSymbols,
+ UnitAttr inclusive) {
+ if (reductionSymbols) {
+ auto reductionArgs =
+ region.front().getArguments().drop_front(loopVarTypes.size());
+ printReductionClause(p, op, reductionArgs, reductionOperands,
+ reductionTypes, reductionSymbols);
+ }
+
+ p << " for ";
+ auto args = region.front().getArguments().drop_back(reductionOperands.size());
+ p << " (" << args << ") : " << args[0].getType() << " = (" << lowerBound
+ << ") to (" << upperBound << ") ";
+ if (inclusive)
+ p << "inclusive ";
+ p << "step (" << steps << ") ";
+ p.printRegion(region, /*printEntryBlockArgs=*/false);
+}
+
+/// loop-control ::= `(` ssa-id-list `)` `:` type `=` loop-bounds
+/// loop-bounds := `(` ssa-id-list `)` to `(` ssa-id-list `)` inclusive? steps
+/// steps := `step` `(`ssa-id-list`)`
+ParseResult
parseLoopControl(OpAsmParser &parser, Region &region,
SmallVectorImpl<OpAsmParser::UnresolvedOperand> &lowerBound,
SmallVectorImpl<OpAsmParser::UnresolvedOperand> &upperBound,
@@ -1662,6 +1753,73 @@ LogicalResult DataBoundsOp::verify() {
return success();
}
+LogicalResult PrivateClauseOp::verify() {
+ Type symType = getType();
+
+ auto verifyTerminator = [&](Operation *terminator) -> LogicalResult {
+ if (!terminator->hasSuccessors() && !llvm::isa<YieldOp>(terminator))
+ return mlir::emitError(terminator->getLoc())
+ << "expected exit block terminator to be an `omp.yield` op.";
+
+ YieldOp yieldOp = llvm::cast<YieldOp>(terminator);
+ TypeRange yieldedTypes = yieldOp.getResults().getTypes();
+
+ if (yieldedTypes.size() == 1 && yieldedTypes.front() == symType)
+ return success();
+
+ auto error = mlir::emitError(yieldOp.getLoc())
+ << "Invalid yielded value. Expected type: " << symType
+ << ", got: ";
+
+ if (yieldedTypes.empty())
+ error << "None";
+ else
+ error << yieldedTypes;
+
+ return error;
+ };
+
+ auto verifyRegion = [&](Region &region, unsigned expectedNumArgs,
+ StringRef regionName) -> LogicalResult {
+ assert(!region.empty());
+
+ if (region.getNumArguments() != expectedNumArgs)
+ return mlir::emitError(region.getLoc())
+ << "`" << regionName << "`: "
+ << "expected " << expectedNumArgs
+ << " region arguments, got: " << region.getNumArguments();
+
+ for (Block &block : region) {
+ // MLIR will verify the absence of the terminator for us.
+ if (!block.mightHaveTerminator())
+ continue;
+
+ if (failed(verifyTerminator(block.getTerminator())))
+ return failure();
+ }
+
+ return success();
+ };
+
+ if (failed(verifyRegion(getAllocRegion(), /*expectedNumArgs=*/1, "alloc")))
+ return failure();
+
+ DataSharingClauseType dsType = getDataSharingType();
+
+ if (dsType == DataSharingClauseType::Private && !getCopyRegion().empty())
+ return emitError("`private` clauses require only an `alloc` region.");
+
+ if (dsType == DataSharingClauseType::FirstPrivate && getCopyRegion().empty())
+ return emitError(
+ "`firstprivate` clauses require both `alloc` and `copy` regions.");
+
+ if (dsType == DataSharingClauseType::FirstPrivate &&
+ failed(verifyRegion(getCopyRegion(), /*expectedNumArgs=*/2, "copy")))
+ return failure();
+
+ return success();
+}
+
#define GET_ATTRDEF_CLASSES
#include "mlir/Dialect/OpenMP/OpenMPOpsAttributes.cpp.inc"
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index 8298cf1..303b38c 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -817,7 +817,7 @@ struct DimOfDestStyleOp : public OpRewritePattern<DimOp> {
return failure();
auto resultIndex = source.cast<OpResult>().getResultNumber();
- auto initOperand = destOp.getDpsInitOperand(resultIndex);
+ auto *initOperand = destOp.getDpsInitOperand(resultIndex);
rewriter.modifyOpInPlace(
dimOp, [&]() { dimOp.getSourceMutable().assign(initOperand->get()); });
@@ -2797,7 +2797,7 @@ RankedTensorType PadOp::inferResultType(RankedTensorType sourceType,
return RankedTensorType();
if (staticHigh.size() != rank)
return RankedTensorType();
- if (!(resultShape.empty() || resultShape.size() == rank))
+ if (!resultShape.empty() && resultShape.size() != rank)
return RankedTensorType();
SmallVector<int64_t, 4> inferredShape;
@@ -3983,6 +3983,41 @@ static bool paddingIsNotNeeded(PackOp op) {
op.getMixedTiles());
}
+/// Returns true if the `srcShape` or `destShape` is different from the one in
+/// `packOp` and populates each with the inferred static shape.
+static bool inferStaticShape(PackOp packOp, SmallVectorImpl<int64_t> &srcShape,
+ SmallVectorImpl<int64_t> &destShape) {
+ bool changeNeeded = false;
+ srcShape.assign(packOp.getSourceType().getShape().begin(),
+ packOp.getSourceType().getShape().end());
+ destShape.assign(packOp.getDestType().getShape().begin(),
+ packOp.getDestType().getShape().end());
+ llvm::SmallSetVector<int64_t, 4> innerDims;
+ innerDims.insert(packOp.getInnerDimsPos().begin(),
+ packOp.getInnerDimsPos().end());
+ auto outerDimsPerm = packOp.getOuterDimsPerm();
+ int srcRank = packOp.getSourceRank();
+ for (auto i : llvm::seq<int64_t>(0, srcRank)) {
+ if (innerDims.contains(i))
+ continue;
+ int64_t srcPos = i;
+ int64_t destPos = i;
+ if (!outerDimsPerm.empty())
+ destPos = outerDimsPerm[srcPos];
+ if (ShapedType::isDynamic(srcShape[srcPos]) ==
+ ShapedType::isDynamic(destShape[destPos])) {
+ continue;
+ }
+ int64_t size = srcShape[srcPos];
+ if (ShapedType::isDynamic(size))
+ size = destShape[destPos];
+ srcShape[srcPos] = size;
+ destShape[destPos] = size;
+ changeNeeded = true;
+ }
+ return changeNeeded;
+}
+
LogicalResult PackOp::canonicalize(PackOp packOp, PatternRewriter &rewriter) {
// Fold an unpack(pack(x)) to x.
if (auto unPackOp = packOp.getSource().getDefiningOp<UnPackOp>()) {
@@ -4003,6 +4038,31 @@ LogicalResult PackOp::canonicalize(PackOp packOp, PatternRewriter &rewriter) {
rewriter.finalizeOpModification(packOp);
return success();
}
+
+ // Insert tensor.cast ops if static shape inference is available..
+ SmallVector<int64_t> srcShape, destShape;
+ if (inferStaticShape(packOp, srcShape, destShape)) {
+ Location loc = packOp.getLoc();
+ Value source = packOp.getSource();
+ if (srcShape != packOp.getSourceType().getShape()) {
+ auto newSrcType = packOp.getSourceType().clone(srcShape);
+ source =
+ rewriter.create<tensor::CastOp>(loc, newSrcType, packOp.getSource());
+ }
+ Value dest = packOp.getDest();
+ if (destShape != packOp.getDestType().getShape()) {
+ auto newDestType = packOp.getDestType().clone(destShape);
+ dest =
+ rewriter.create<tensor::CastOp>(loc, newDestType, packOp.getDest());
+ }
+ Value newOp = rewriter.create<tensor::PackOp>(
+ loc, source, dest, packOp.getInnerDimsPos(), packOp.getMixedTiles(),
+ packOp.getPaddingValue(), packOp.getOuterDimsPerm());
+ rewriter.replaceOpWithNewOp<tensor::CastOp>(
+ packOp, packOp.getResult().getType(), newOp);
+ return success();
+ }
+
return failure();
}
diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
index 729116d..950ee59 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
@@ -108,7 +108,7 @@ struct TosaDialectBytecodeInterface : public BytecodeDialectInterface {
}
LogicalResult upgradeFromVersion(Operation *topLevelOp,
- const DialectVersion &version_) const final {
+ const DialectVersion &version) const final {
return success();
}
};
@@ -454,12 +454,12 @@ static void
buildAvgPool2dOpWithQuantInfo(OpBuilder &builder, OperationState &result,
Type outputType, Value input,
DenseArrayAttr kernel, DenseArrayAttr stride,
- DenseArrayAttr pad, TypeAttr acc_type) {
+ DenseArrayAttr pad, TypeAttr accType) {
result.addOperands(input);
result.addAttribute("kernel", kernel);
result.addAttribute("stride", stride);
result.addAttribute("pad", pad);
- result.addAttribute("acc_type", acc_type);
+ result.addAttribute("acc_type", accType);
auto quantAttr = buildUnaryOpQuantizationAttr(builder, input, outputType);
if (quantAttr)
result.addAttribute("quantization_info", quantAttr);
diff --git a/mlir/lib/Dialect/Transform/IR/TransformOps.cpp b/mlir/lib/Dialect/Transform/IR/TransformOps.cpp
index f2a5738..180d11c 100644
--- a/mlir/lib/Dialect/Transform/IR/TransformOps.cpp
+++ b/mlir/lib/Dialect/Transform/IR/TransformOps.cpp
@@ -260,7 +260,7 @@ transform::AnnotateOp::apply(transform::TransformRewriter &rewriter,
}
attr = params[0];
}
- for (auto target : targets)
+ for (auto *target : targets)
target->setAttr(getName(), attr);
return DiagnosedSilenceableFailure::success();
}
@@ -330,7 +330,7 @@ DiagnosedSilenceableFailure transform::ApplyDeadCodeEliminationOp::applyToOne(
auto eraseOp = [&](Operation *op) {
// Remove op and nested ops from the worklist.
op->walk([&](Operation *op) {
- auto it = llvm::find(worklist, op);
+ const auto *it = llvm::find(worklist, op);
if (it != worklist.end())
worklist.erase(it);
});
@@ -666,7 +666,7 @@ void transform::ApplyToLLVMConversionPatternsOp::populatePatterns(
TypeConverter &typeConverter, RewritePatternSet &patterns) {
Dialect *dialect = getContext()->getLoadedDialect(getDialectName());
assert(dialect && "expected that dialect is loaded");
- auto iface = cast<ConvertToLLVMPatternInterface>(dialect);
+ auto *iface = cast<ConvertToLLVMPatternInterface>(dialect);
// ConversionTarget is currently ignored because the enclosing
// apply_conversion_patterns op sets up its own ConversionTarget.
ConversionTarget target(*getContext());
@@ -686,7 +686,7 @@ LogicalResult transform::ApplyToLLVMConversionPatternsOp::verify() {
if (!dialect)
return emitOpError("unknown dialect or dialect not loaded: ")
<< getDialectName();
- auto iface = dyn_cast<ConvertToLLVMPatternInterface>(dialect);
+ auto *iface = dyn_cast<ConvertToLLVMPatternInterface>(dialect);
if (!iface)
return emitOpError(
"dialect does not implement ConvertToLLVMPatternInterface or "
@@ -1754,7 +1754,7 @@ DiagnosedSilenceableFailure
transform::MatchParamCmpIOp::apply(transform::TransformRewriter &rewriter,
transform::TransformResults &results,
transform::TransformState &state) {
- auto signedAPIntAsString = [&](APInt value) {
+ auto signedAPIntAsString = [&](const APInt &value) {
std::string str;
llvm::raw_string_ostream os(str);
value.print(os, /*isSigned=*/true);
@@ -2223,7 +2223,7 @@ transform::SplitHandleOp::apply(transform::TransformRewriter &rewriter,
// - "fail_on_payload_too_small" is set to "false", or
// - "pass_through_empty_handle" is set to "true" and there are 0 payload ops.
if (numPayloadOps < getNumResults() && getFailOnPayloadTooSmall() &&
- !(numPayloadOps == 0 && getPassThroughEmptyHandle()))
+ (numPayloadOps != 0 || !getPassThroughEmptyHandle()))
return produceNumOpsError();
// Distribute payload ops.
diff --git a/mlir/lib/Dialect/Utils/IndexingUtils.cpp b/mlir/lib/Dialect/Utils/IndexingUtils.cpp
index 2765d1e..baaa581 100644
--- a/mlir/lib/Dialect/Utils/IndexingUtils.cpp
+++ b/mlir/lib/Dialect/Utils/IndexingUtils.cpp
@@ -271,9 +271,8 @@ static MLIRContext *getContext(OpFoldResult val) {
assert(val && "Invalid value");
if (auto attr = dyn_cast<Attribute>(val)) {
return attr.getContext();
- } else {
- return cast<Value>(val).getContext();
}
+ return cast<Value>(val).getContext();
}
std::pair<AffineExpr, SmallVector<OpFoldResult>>
diff --git a/mlir/lib/Dialect/Vector/Transforms/CMakeLists.txt b/mlir/lib/Dialect/Vector/Transforms/CMakeLists.txt
index daf2888..adf961f 100644
--- a/mlir/lib/Dialect/Vector/Transforms/CMakeLists.txt
+++ b/mlir/lib/Dialect/Vector/Transforms/CMakeLists.txt
@@ -16,6 +16,7 @@ add_mlir_dialect_library(MLIRVectorTransforms
VectorEmulateMaskedLoadStore.cpp
VectorEmulateNarrowType.cpp
VectorInsertExtractStridedSliceRewritePatterns.cpp
+ VectorLinearize.cpp
VectorTransferOpTransforms.cpp
VectorTransferSplitRewritePatterns.cpp
VectorTransforms.cpp
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp
new file mode 100644
index 0000000..c535204
--- /dev/null
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp
@@ -0,0 +1,97 @@
+//===- VectorLinearize.cpp - vector linearization transforms --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements patterns and pass for linearizing ND vectors into 1D.
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/Arith/IR/Arith.h"
+#include "mlir/Dialect/Vector/IR/VectorOps.h"
+#include "mlir/Dialect/Vector/Transforms/VectorRewritePatterns.h"
+#include "mlir/IR/PatternMatch.h"
+#include "mlir/IR/TypeUtilities.h"
+#include "mlir/Transforms/DialectConversion.h"
+
+using namespace mlir;
+
+namespace {
+struct LinearizeConstant final : OpConversionPattern<arith::ConstantOp> {
+ using OpConversionPattern::OpConversionPattern;
+
+ LogicalResult
+ matchAndRewrite(arith::ConstantOp constOp, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const override {
+ Location loc = constOp.getLoc();
+ auto resType =
+ getTypeConverter()->convertType<VectorType>(constOp.getType());
+ if (!resType)
+ return rewriter.notifyMatchFailure(loc, "can't convert return type");
+
+ auto dstElementsAttr = dyn_cast<DenseElementsAttr>(constOp.getValue());
+ if (!dstElementsAttr)
+ return rewriter.notifyMatchFailure(loc, "unsupported attr type");
+
+ dstElementsAttr = dstElementsAttr.reshape(resType);
+ rewriter.replaceOpWithNewOp<arith::ConstantOp>(constOp, resType,
+ dstElementsAttr);
+ return success();
+ }
+};
+
+struct LinearizeVectorizable final
+ : OpTraitConversionPattern<OpTrait::Vectorizable> {
+ using OpTraitConversionPattern::OpTraitConversionPattern;
+
+ LogicalResult
+ matchAndRewrite(Operation *op, ArrayRef<Value> operands,
+ ConversionPatternRewriter &rewriter) const override {
+ FailureOr<Operation *> newOp =
+ convertOpResultTypes(op, operands, *getTypeConverter(), rewriter);
+ if (failed(newOp))
+ return failure();
+
+ rewriter.replaceOp(op, (*newOp)->getResults());
+ return success();
+ }
+};
+} // namespace
+
+void mlir::vector::populateVectorLinearizeTypeConversionsAndLegality(
+ TypeConverter &typeConverter, RewritePatternSet &patterns,
+ ConversionTarget &target) {
+ typeConverter.addConversion([](VectorType type) -> std::optional<Type> {
+ // Ignore scalable vectors for now.
+ if (type.getRank() <= 1 || type.isScalable())
+ return type;
+
+ return VectorType::get(type.getNumElements(), type.getElementType());
+ });
+
+ auto materializeCast = [](OpBuilder &builder, Type type, ValueRange inputs,
+ Location loc) -> Value {
+ if (inputs.size() != 1 || !isa<VectorType>(inputs.front().getType()) ||
+ !isa<VectorType>(type))
+ return nullptr;
+
+ return builder.create<vector::ShapeCastOp>(loc, type, inputs.front());
+ };
+ typeConverter.addArgumentMaterialization(materializeCast);
+ typeConverter.addSourceMaterialization(materializeCast);
+ typeConverter.addTargetMaterialization(materializeCast);
+
+ target.markUnknownOpDynamicallyLegal(
+ [&](Operation *op) -> std::optional<bool> {
+ if (isa<arith::ConstantOp>(op) || op->hasTrait<OpTrait::Vectorizable>())
+ return typeConverter.isLegal(op);
+
+ return std::nullopt;
+ });
+
+ patterns.add<LinearizeConstant, LinearizeVectorizable>(typeConverter,
+ patterns.getContext());
+}
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
index 53ae138..74dd1df 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
@@ -1237,7 +1237,7 @@ class DropInnerMostUnitDimsTransferRead
return failure();
auto srcType = dyn_cast<MemRefType>(readOp.getSource().getType());
- if (!srcType || !srcType.hasStaticShape())
+ if (!srcType)
return failure();
if (!readOp.getPermutationMap().isMinorIdentity())
@@ -1261,19 +1261,21 @@ class DropInnerMostUnitDimsTransferRead
targetType.getElementType());
auto loc = readOp.getLoc();
+ SmallVector<OpFoldResult> sizes =
+ memref::getMixedSizes(rewriter, loc, readOp.getSource());
+ SmallVector<OpFoldResult> offsets(srcType.getRank(),
+ rewriter.getIndexAttr(0));
+ SmallVector<OpFoldResult> strides(srcType.getRank(),
+ rewriter.getIndexAttr(1));
MemRefType resultMemrefType =
getMemRefTypeWithDroppingInnerDims(rewriter, srcType, dimsToDrop);
- SmallVector<int64_t> offsets(srcType.getRank(), 0);
- SmallVector<int64_t> strides(srcType.getRank(), 1);
-
ArrayAttr inBoundsAttr =
readOp.getInBounds()
? rewriter.getArrayAttr(
readOp.getInBoundsAttr().getValue().drop_back(dimsToDrop))
: ArrayAttr();
Value rankedReducedView = rewriter.create<memref::SubViewOp>(
- loc, resultMemrefType, readOp.getSource(), offsets, srcType.getShape(),
- strides);
+ loc, resultMemrefType, readOp.getSource(), offsets, sizes, strides);
auto permMap = getTransferMinorIdentityMap(
cast<ShapedType>(rankedReducedView.getType()), resultTargetVecType);
Value result = rewriter.create<vector::TransferReadOp>(
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp
index 78b0412..c837764 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp
@@ -311,7 +311,7 @@ struct UnrollContractionPattern
applyPermutationMap(accPermutationMap, ArrayRef<int64_t>(offsets));
// If a version of the accumulator has already been computed, use it
// otherwise extract the first version from the original operand.
- auto accIt = accCache.find(accOffets);
+ auto *accIt = accCache.find(accOffets);
if (accIt != accCache.end())
slicesOperands[2] = accIt->second;
else
@@ -387,7 +387,7 @@ struct UnrollMultiReductionPattern
SmallVector<int64_t> accStrides(destOffset.size(), 1);
// If a version of the accumulator has already been computed, use it
// otherwise extract the first version from the original operand.
- auto accIt = accCache.find(destOffset);
+ auto *accIt = accCache.find(destOffset);
if (accIt != accCache.end())
acc = accIt->second;
else
diff --git a/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp b/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
index e7ac8f1..a5e75a7 100644
--- a/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
+++ b/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
@@ -508,9 +508,9 @@ MLIR_SPARSETENSOR_FOREVERY_V(IMPL_DELCOO)
#undef IMPL_DELCOO
char *getTensorFilename(index_type id) {
- constexpr size_t BUF_SIZE = 80;
- char var[BUF_SIZE];
- snprintf(var, BUF_SIZE, "TENSOR%" PRIu64, id);
+ constexpr size_t bufSize = 80;
+ char var[bufSize];
+ snprintf(var, bufSize, "TENSOR%" PRIu64, id);
char *env = getenv(var);
if (!env)
MLIR_SPARSETENSOR_FATAL("Environment variable %s is not set\n", var);
diff --git a/mlir/lib/Interfaces/InferIntRangeInterface.cpp b/mlir/lib/Interfaces/InferIntRangeInterface.cpp
index cc31104..b3f6c0e 100644
--- a/mlir/lib/Interfaces/InferIntRangeInterface.cpp
+++ b/mlir/lib/Interfaces/InferIntRangeInterface.cpp
@@ -8,8 +8,8 @@
#include "mlir/Interfaces/InferIntRangeInterface.h"
#include "mlir/IR/BuiltinTypes.h"
-#include <optional>
#include "mlir/Interfaces/InferIntRangeInterface.cpp.inc"
+#include <optional>
using namespace mlir;
diff --git a/mlir/lib/Target/LLVMIR/AttrKindDetail.h b/mlir/lib/Target/LLVMIR/AttrKindDetail.h
index b01858e..ddc6d46 100644
--- a/mlir/lib/Target/LLVMIR/AttrKindDetail.h
+++ b/mlir/lib/Target/LLVMIR/AttrKindDetail.h
@@ -61,7 +61,8 @@ getAttrKindToNameMapping() {
/// Returns a dense map from LLVM attribute name to their kind in LLVM IR
/// dialect.
-static llvm::DenseMap<llvm::StringRef, llvm::Attribute::AttrKind>
+[[maybe_unused]] static llvm::DenseMap<llvm::StringRef,
+ llvm::Attribute::AttrKind>
getAttrNameToKindMapping() {
static auto attrNameToKindMapping = []() {
llvm::DenseMap<llvm::StringRef, llvm::Attribute::AttrKind> nameKindMap;
diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
index c87e895..78a2ad7 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
@@ -786,17 +786,17 @@ allocReductionVars(T loop, llvm::IRBuilderBase &builder,
SmallVector<omp::ReductionDeclareOp> &reductionDecls,
SmallVector<llvm::Value *> &privateReductionVariables,
DenseMap<Value, llvm::Value *> &reductionVariableMap) {
- unsigned numReductions = loop.getNumReductionVars();
- privateReductionVariables.reserve(numReductions);
- if (numReductions != 0) {
- llvm::IRBuilderBase::InsertPointGuard guard(builder);
- builder.restoreIP(allocaIP);
- for (unsigned i = 0; i < numReductions; ++i) {
- llvm::Value *var = builder.CreateAlloca(
- moduleTranslation.convertType(reductionDecls[i].getType()));
- privateReductionVariables.push_back(var);
- reductionVariableMap.try_emplace(loop.getReductionVars()[i], var);
- }
+ llvm::IRBuilderBase::InsertPointGuard guard(builder);
+ builder.restoreIP(allocaIP);
+ auto args =
+ loop.getRegion().getArguments().take_back(loop.getNumReductionVars());
+
+ for (std::size_t i = 0; i < loop.getNumReductionVars(); ++i) {
+ llvm::Value *var = builder.CreateAlloca(
+ moduleTranslation.convertType(reductionDecls[i].getType()));
+ moduleTranslation.mapValue(args[i], var);
+ privateReductionVariables.push_back(var);
+ reductionVariableMap.try_emplace(loop.getReductionVars()[i], var);
}
}
@@ -1018,19 +1018,9 @@ convertOmpParallel(omp::ParallelOp opInst, llvm::IRBuilderBase &builder,
// Allocate reduction vars
SmallVector<llvm::Value *> privateReductionVariables;
DenseMap<Value, llvm::Value *> reductionVariableMap;
- {
- llvm::IRBuilderBase::InsertPointGuard guard(builder);
- builder.restoreIP(allocaIP);
- auto args = opInst.getRegion().getArguments();
-
- for (std::size_t i = 0; i < opInst.getNumReductionVars(); ++i) {
- llvm::Value *var = builder.CreateAlloca(
- moduleTranslation.convertType(reductionDecls[i].getType()));
- moduleTranslation.mapValue(args[i], var);
- privateReductionVariables.push_back(var);
- reductionVariableMap.try_emplace(opInst.getReductionVars()[i], var);
- }
- }
+ allocReductionVars(opInst, builder, moduleTranslation, allocaIP,
+ reductionDecls, privateReductionVariables,
+ reductionVariableMap);
// Store the mapping between reduction variables and their private copies on
// ModuleTranslation stack. It can be then recovered when translating
diff --git a/mlir/lib/Transforms/Utils/DialectConversion.cpp b/mlir/lib/Transforms/Utils/DialectConversion.cpp
index e904470..84597fb 100644
--- a/mlir/lib/Transforms/Utils/DialectConversion.cpp
+++ b/mlir/lib/Transforms/Utils/DialectConversion.cpp
@@ -154,13 +154,12 @@ namespace {
struct RewriterState {
RewriterState(unsigned numCreatedOps, unsigned numUnresolvedMaterializations,
unsigned numReplacements, unsigned numArgReplacements,
- unsigned numBlockActions, unsigned numIgnoredOperations,
+ unsigned numRewrites, unsigned numIgnoredOperations,
unsigned numRootUpdates)
: numCreatedOps(numCreatedOps),
numUnresolvedMaterializations(numUnresolvedMaterializations),
numReplacements(numReplacements),
- numArgReplacements(numArgReplacements),
- numBlockActions(numBlockActions),
+ numArgReplacements(numArgReplacements), numRewrites(numRewrites),
numIgnoredOperations(numIgnoredOperations),
numRootUpdates(numRootUpdates) {}
@@ -176,8 +175,8 @@ struct RewriterState {
/// The current number of argument replacements queued.
unsigned numArgReplacements;
- /// The current number of block actions performed.
- unsigned numBlockActions;
+ /// The current number of rewrites performed.
+ unsigned numRewrites;
/// The current number of ignored operations.
unsigned numIgnoredOperations;
@@ -236,86 +235,6 @@ struct OpReplacement {
};
//===----------------------------------------------------------------------===//
-// BlockAction
-
-/// The kind of the block action performed during the rewrite. Actions can be
-/// undone if the conversion fails.
-enum class BlockActionKind {
- Create,
- Erase,
- Inline,
- Move,
- Split,
- TypeConversion
-};
-
-/// Original position of the given block in its parent region. During undo
-/// actions, the block needs to be placed before `insertBeforeBlock`.
-struct BlockPosition {
- Region *region;
- Block *insertBeforeBlock;
-};
-
-/// Information needed to undo inlining actions.
-/// - the source block
-/// - the first inlined operation (could be null if the source block was empty)
-/// - the last inlined operation (could be null if the source block was empty)
-struct InlineInfo {
- Block *sourceBlock;
- Operation *firstInlinedInst;
- Operation *lastInlinedInst;
-};
-
-/// The storage class for an undoable block action (one of BlockActionKind),
-/// contains the information necessary to undo this action.
-struct BlockAction {
- static BlockAction getCreate(Block *block) {
- return {BlockActionKind::Create, block, {}};
- }
- static BlockAction getErase(Block *block, BlockPosition originalPosition) {
- return {BlockActionKind::Erase, block, {originalPosition}};
- }
- static BlockAction getInline(Block *block, Block *srcBlock,
- Block::iterator before) {
- BlockAction action{BlockActionKind::Inline, block, {}};
- action.inlineInfo = {srcBlock,
- srcBlock->empty() ? nullptr : &srcBlock->front(),
- srcBlock->empty() ? nullptr : &srcBlock->back()};
- return action;
- }
- static BlockAction getMove(Block *block, BlockPosition originalPosition) {
- return {BlockActionKind::Move, block, {originalPosition}};
- }
- static BlockAction getSplit(Block *block, Block *originalBlock) {
- BlockAction action{BlockActionKind::Split, block, {}};
- action.originalBlock = originalBlock;
- return action;
- }
- static BlockAction getTypeConversion(Block *block) {
- return BlockAction{BlockActionKind::TypeConversion, block, {}};
- }
-
- // The action kind.
- BlockActionKind kind;
-
- // A pointer to the block that was created by the action.
- Block *block;
-
- union {
- // In use if kind == BlockActionKind::Inline or BlockActionKind::Erase, and
- // contains a pointer to the region that originally contained the block as
- // well as the position of the block in that region.
- BlockPosition originalPosition;
- // In use if kind == BlockActionKind::Split and contains a pointer to the
- // block that was split into two parts.
- Block *originalBlock;
- // In use if kind == BlockActionKind::Inline, and contains the information
- // needed to undo the inlining.
- InlineInfo inlineInfo;
- };
-};
-
-//===----------------------------------------------------------------------===//
// UnresolvedMaterialization
/// This class represents an unresolved materialization, i.e. a materialization
@@ -821,11 +740,305 @@ void ArgConverter::insertConversion(Block *newBlock,
}
//===----------------------------------------------------------------------===//
+// IR rewrites
+//===----------------------------------------------------------------------===//
+
+namespace {
+/// An IR rewrite that can be committed (upon success) or rolled back (upon
+/// failure).
+///
+/// The dialect conversion keeps track of IR modifications (requested by the
+/// user through the rewriter API) in `IRRewrite` objects. Some kind of rewrites
+/// are directly applied to the IR as the rewriter API is used, some are applied
+/// partially, and some are delayed until the `IRRewrite` objects are committed.
+class IRRewrite {
+public:
+ /// The kind of the rewrite. Rewrites can be undone if the conversion fails.
+ enum class Kind {
+ CreateBlock,
+ EraseBlock,
+ InlineBlock,
+ MoveBlock,
+ SplitBlock,
+ BlockTypeConversion,
+ MoveOperation
+ };
+
+ virtual ~IRRewrite() = default;
+
+ /// Roll back the rewrite.
+ virtual void rollback() = 0;
+
+ /// Commit the rewrite.
+ virtual void commit() {}
+
+ Kind getKind() const { return kind; }
+
+ static bool classof(const IRRewrite *rewrite) { return true; }
+
+protected:
+ IRRewrite(Kind kind, ConversionPatternRewriterImpl &rewriterImpl)
+ : kind(kind), rewriterImpl(rewriterImpl) {}
+
+ const Kind kind;
+ ConversionPatternRewriterImpl &rewriterImpl;
+};
+
+/// A block rewrite.
+class BlockRewrite : public IRRewrite {
+public:
+ /// Return the block that this rewrite operates on.
+ Block *getBlock() const { return block; }
+
+ static bool classof(const IRRewrite *rewrite) {
+ return rewrite->getKind() >= Kind::CreateBlock &&
+ rewrite->getKind() <= Kind::BlockTypeConversion;
+ }
+
+protected:
+ BlockRewrite(Kind kind, ConversionPatternRewriterImpl &rewriterImpl,
+ Block *block)
+ : IRRewrite(kind, rewriterImpl), block(block) {}
+
+ // The block that this rewrite operates on.
+ Block *block;
+};
+
+/// Creation of a block. Block creations are immediately reflected in the IR.
+/// There is no extra work to commit the rewrite. During rollback, the newly
+/// created block is erased.
+class CreateBlockRewrite : public BlockRewrite {
+public:
+ CreateBlockRewrite(ConversionPatternRewriterImpl &rewriterImpl, Block *block)
+ : BlockRewrite(Kind::CreateBlock, rewriterImpl, block) {}
+
+ static bool classof(const IRRewrite *rewrite) {
+ return rewrite->getKind() == Kind::CreateBlock;
+ }
+
+ void rollback() override {
+ // Unlink all of the operations within this block, they will be deleted
+ // separately.
+ auto &blockOps = block->getOperations();
+ while (!blockOps.empty())
+ blockOps.remove(blockOps.begin());
+ block->dropAllDefinedValueUses();
+ block->erase();
+ }
+};
+
+/// Erasure of a block. Block erasures are partially reflected in the IR. Erased
+/// blocks are immediately unlinked, but only erased when the rewrite is
+/// committed. This makes it easier to rollback a block erasure: the block is
+/// simply inserted into its original location.
+class EraseBlockRewrite : public BlockRewrite {
+public:
+ EraseBlockRewrite(ConversionPatternRewriterImpl &rewriterImpl, Block *block,
+ Region *region, Block *insertBeforeBlock)
+ : BlockRewrite(Kind::EraseBlock, rewriterImpl, block), region(region),
+ insertBeforeBlock(insertBeforeBlock) {}
+
+ static bool classof(const IRRewrite *rewrite) {
+ return rewrite->getKind() == Kind::EraseBlock;
+ }
+
+ ~EraseBlockRewrite() override {
+ assert(!block && "rewrite was neither rolled back nor committed");
+ }
+
+ void rollback() override {
+ // The block (owned by this rewrite) was not actually erased yet. It was
+ // just unlinked. Put it back into its original position.
+ assert(block && "expected block");
+ auto &blockList = region->getBlocks();
+ Region::iterator before = insertBeforeBlock
+ ? Region::iterator(insertBeforeBlock)
+ : blockList.end();
+ blockList.insert(before, block);
+ block = nullptr;
+ }
+
+ void commit() override {
+ // Erase the block.
+ assert(block && "expected block");
+ delete block;
+ block = nullptr;
+ }
+
+private:
+ // The region in which this block was previously contained.
+ Region *region;
+
+ // The original successor of this block before it was unlinked. "nullptr" if
+ // this block was the only block in the region.
+ Block *insertBeforeBlock;
+};
+
+/// Inlining of a block. This rewrite is immediately reflected in the IR.
+/// Note: This rewrite represents only the inlining of the operations. The
+/// erasure of the inlined block is a separate rewrite.
+class InlineBlockRewrite : public BlockRewrite {
+public:
+ InlineBlockRewrite(ConversionPatternRewriterImpl &rewriterImpl, Block *block,
+ Block *sourceBlock, Block::iterator before)
+ : BlockRewrite(Kind::InlineBlock, rewriterImpl, block),
+ sourceBlock(sourceBlock),
+ firstInlinedInst(sourceBlock->empty() ? nullptr
+ : &sourceBlock->front()),
+ lastInlinedInst(sourceBlock->empty() ? nullptr : &sourceBlock->back()) {
+ }
+
+ static bool classof(const IRRewrite *rewrite) {
+ return rewrite->getKind() == Kind::InlineBlock;
+ }
+
+ void rollback() override {
+ // Put the operations from the destination block (owned by the rewrite)
+ // back into the source block.
+ if (firstInlinedInst) {
+ assert(lastInlinedInst && "expected operation");
+ sourceBlock->getOperations().splice(sourceBlock->begin(),
+ block->getOperations(),
+ Block::iterator(firstInlinedInst),
+ ++Block::iterator(lastInlinedInst));
+ }
+ }
+
+private:
+ // The block that originally contained the operations.
+ Block *sourceBlock;
+
+ // The first inlined operation.
+ Operation *firstInlinedInst;
+
+ // The last inlined operation.
+ Operation *lastInlinedInst;
+};
+
+/// Moving of a block. This rewrite is immediately reflected in the IR.
+class MoveBlockRewrite : public BlockRewrite {
+public:
+ MoveBlockRewrite(ConversionPatternRewriterImpl &rewriterImpl, Block *block,
+ Region *region, Block *insertBeforeBlock)
+ : BlockRewrite(Kind::MoveBlock, rewriterImpl, block), region(region),
+ insertBeforeBlock(insertBeforeBlock) {}
+
+ static bool classof(const IRRewrite *rewrite) {
+ return rewrite->getKind() == Kind::MoveBlock;
+ }
+
+ void rollback() override {
+ // Move the block back to its original position.
+ Region::iterator before =
+ insertBeforeBlock ? Region::iterator(insertBeforeBlock) : region->end();
+ region->getBlocks().splice(before, block->getParent()->getBlocks(), block);
+ }
+
+private:
+ // The region in which this block was previously contained.
+ Region *region;
+
+ // The original successor of this block before it was moved. "nullptr" if
+ // this block was the only block in the region.
+ Block *insertBeforeBlock;
+};
+
+/// Splitting of a block. This rewrite is immediately reflected in the IR.
+class SplitBlockRewrite : public BlockRewrite {
+public:
+ SplitBlockRewrite(ConversionPatternRewriterImpl &rewriterImpl, Block *block,
+ Block *originalBlock)
+ : BlockRewrite(Kind::SplitBlock, rewriterImpl, block),
+ originalBlock(originalBlock) {}
+
+ static bool classof(const IRRewrite *rewrite) {
+ return rewrite->getKind() == Kind::SplitBlock;
+ }
+
+ void rollback() override {
+ // Merge back the block that was split out.
+ originalBlock->getOperations().splice(originalBlock->end(),
+ block->getOperations());
+ block->dropAllDefinedValueUses();
+ block->erase();
+ }
+
+private:
+ // The original block from which this block was split.
+ Block *originalBlock;
+};
+
+/// Block type conversion. This rewrite is partially reflected in the IR.
+class BlockTypeConversionRewrite : public BlockRewrite {
+public:
+ BlockTypeConversionRewrite(ConversionPatternRewriterImpl &rewriterImpl,
+ Block *block)
+ : BlockRewrite(Kind::BlockTypeConversion, rewriterImpl, block) {}
+
+ static bool classof(const IRRewrite *rewrite) {
+ return rewrite->getKind() == Kind::BlockTypeConversion;
+ }
+
+ // TODO: Block type conversions are currently committed in
+ // `ArgConverter::applyRewrites`. This should be done in the "commit" method.
+ void rollback() override;
+};
+
+/// An operation rewrite.
+class OperationRewrite : public IRRewrite {
+public:
+ /// Return the operation that this rewrite operates on.
+ Operation *getOperation() const { return op; }
+
+ static bool classof(const IRRewrite *rewrite) {
+ return rewrite->getKind() >= Kind::MoveOperation &&
+ rewrite->getKind() <= Kind::MoveOperation;
+ }
+
+protected:
+ OperationRewrite(Kind kind, ConversionPatternRewriterImpl &rewriterImpl,
+ Operation *op)
+ : IRRewrite(kind, rewriterImpl), op(op) {}
+
+ // The operation that this rewrite operates on.
+ Operation *op;
+};
+
+/// Moving of an operation. This rewrite is immediately reflected in the IR.
+class MoveOperationRewrite : public OperationRewrite {
+public:
+ MoveOperationRewrite(ConversionPatternRewriterImpl &rewriterImpl,
+ Operation *op, Block *block, Operation *insertBeforeOp)
+ : OperationRewrite(Kind::MoveOperation, rewriterImpl, op), block(block),
+ insertBeforeOp(insertBeforeOp) {}
+
+ static bool classof(const IRRewrite *rewrite) {
+ return rewrite->getKind() == Kind::MoveOperation;
+ }
+
+ void rollback() override {
+ // Move the operation back to its original position.
+ Block::iterator before =
+ insertBeforeOp ? Block::iterator(insertBeforeOp) : block->end();
+ block->getOperations().splice(before, op->getBlock()->getOperations(), op);
+ }
+
+private:
+ // The block in which this operation was previously contained.
+ Block *block;
+
+ // The original successor of this operation before it was moved. "nullptr" if
+ // this operation was the only operation in the region.
+ Operation *insertBeforeOp;
+};
+} // namespace
+
+//===----------------------------------------------------------------------===//
// ConversionPatternRewriterImpl
//===----------------------------------------------------------------------===//
namespace mlir {
namespace detail {
-struct ConversionPatternRewriterImpl {
+struct ConversionPatternRewriterImpl : public RewriterBase::Listener {
explicit ConversionPatternRewriterImpl(PatternRewriter &rewriter)
: argConverter(rewriter, unresolvedMaterializations),
notifyCallback(nullptr) {}
@@ -848,13 +1061,17 @@ struct ConversionPatternRewriterImpl {
/// Reset the state of the rewriter to a previously saved point.
void resetState(RewriterState state);
- /// Erase any blocks that were unlinked from their regions and stored in block
- /// actions.
- void eraseDanglingBlocks();
+ /// Append a rewrite. Rewrites are committed upon success and rolled back upon
+ /// failure.
+ template <typename RewriteTy, typename... Args>
+ void appendRewrite(Args &&...args) {
+ rewrites.push_back(
+ std::make_unique<RewriteTy>(*this, std::forward<Args>(args)...));
+ }
- /// Undo the block actions (motions, splits) one by one in reverse order until
- /// "numActionsToKeep" actions remains.
- void undoBlockActions(unsigned numActionsToKeep = 0);
+ /// Undo the rewrites (motions, splits) one by one in reverse order until
+ /// "numRewritesToKeep" rewrites remains.
+ void undoRewrites(unsigned numRewritesToKeep = 0);
/// Remap the given values to those with potentially different types. Returns
/// success if the values could be remapped, failure otherwise. `valueDiagTag`
@@ -903,15 +1120,19 @@ struct ConversionPatternRewriterImpl {
// Rewriter Notification Hooks
//===--------------------------------------------------------------------===//
- /// PatternRewriter hook for replacing the results of an operation.
+ //// Notifies that an op was inserted.
+ void notifyOperationInserted(Operation *op,
+ OpBuilder::InsertPoint previous) override;
+
+ /// Notifies that an op is about to be replaced with the given values.
void notifyOpReplaced(Operation *op, ValueRange newValues);
/// Notifies that a block is about to be erased.
void notifyBlockIsBeingErased(Block *block);
- /// Notifies that a block was created.
- void notifyInsertedBlock(Block *block, Region *previous,
- Region::iterator previousIt);
+ /// Notifies that a block was inserted.
+ void notifyBlockInserted(Block *block, Region *previous,
+ Region::iterator previousIt) override;
/// Notifies that a block was split.
void notifySplitBlock(Block *block, Block *continuation);
@@ -921,8 +1142,9 @@ struct ConversionPatternRewriterImpl {
Block::iterator before);
/// Notifies that a pattern match failed for the given reason.
- void notifyMatchFailure(Location loc,
- function_ref<void(Diagnostic &)> reasonCallback);
+ void
+ notifyMatchFailure(Location loc,
+ function_ref<void(Diagnostic &)> reasonCallback) override;
//===--------------------------------------------------------------------===//
// State
@@ -949,7 +1171,7 @@ struct ConversionPatternRewriterImpl {
SmallVector<BlockArgument, 4> argReplacements;
/// Ordered list of block operations (creations, splits, motions).
- SmallVector<BlockAction, 4> blockActions;
+ SmallVector<std::unique_ptr<IRRewrite>> rewrites;
/// A set of operations that should no longer be considered for legalization,
/// but were not directly replace/erased/etc. by a pattern. These are
@@ -990,6 +1212,11 @@ struct ConversionPatternRewriterImpl {
} // namespace detail
} // namespace mlir
+void BlockTypeConversionRewrite::rollback() {
+ // Undo the type conversion.
+ rewriterImpl.argConverter.discardRewrites(block);
+}
+
/// Detach any operations nested in the given operation from their parent
/// blocks, and erase the given operation. This can be used when the nested
/// operations are scheduled for erasure themselves, so deleting the regions of
@@ -1015,7 +1242,7 @@ void ConversionPatternRewriterImpl::discardRewrites() {
for (auto &state : rootUpdates)
state.resetOperation();
- undoBlockActions();
+ undoRewrites();
// Remove any newly created ops.
for (UnresolvedMaterialization &materialization : unresolvedMaterializations)
@@ -1078,8 +1305,9 @@ void ConversionPatternRewriterImpl::applyRewrites() {
argConverter.applyRewrites(mapping);
- // Now that the ops have been erased, also erase dangling blocks.
- eraseDanglingBlocks();
+ // Commit all rewrites.
+ for (auto &rewrite : rewrites)
+ rewrite->commit();
}
//===----------------------------------------------------------------------===//
@@ -1088,8 +1316,7 @@ void ConversionPatternRewriterImpl::applyRewrites() {
RewriterState ConversionPatternRewriterImpl::getCurrentState() {
return RewriterState(createdOps.size(), unresolvedMaterializations.size(),
replacements.size(), argReplacements.size(),
- blockActions.size(), ignoredOps.size(),
- rootUpdates.size());
+ rewrites.size(), ignoredOps.size(), rootUpdates.size());
}
void ConversionPatternRewriterImpl::resetState(RewriterState state) {
@@ -1104,8 +1331,8 @@ void ConversionPatternRewriterImpl::resetState(RewriterState state) {
mapping.erase(replacedArg);
argReplacements.resize(state.numArgReplacements);
- // Undo any block actions.
- undoBlockActions(state.numBlockActions);
+ // Undo any rewrites.
+ undoRewrites(state.numRewrites);
// Reset any replaced operations and undo any saved mappings.
for (auto &repl : llvm::drop_begin(replacements, state.numReplacements))
@@ -1144,76 +1371,11 @@ void ConversionPatternRewriterImpl::resetState(RewriterState state) {
operationsWithChangedResults.pop_back();
}
-void ConversionPatternRewriterImpl::eraseDanglingBlocks() {
- for (auto &action : blockActions)
- if (action.kind == BlockActionKind::Erase)
- delete action.block;
-}
-
-void ConversionPatternRewriterImpl::undoBlockActions(
- unsigned numActionsToKeep) {
- for (auto &action :
- llvm::reverse(llvm::drop_begin(blockActions, numActionsToKeep))) {
- switch (action.kind) {
- // Delete the created block.
- case BlockActionKind::Create: {
- // Unlink all of the operations within this block, they will be deleted
- // separately.
- auto &blockOps = action.block->getOperations();
- while (!blockOps.empty())
- blockOps.remove(blockOps.begin());
- action.block->dropAllDefinedValueUses();
- action.block->erase();
- break;
- }
- // Put the block (owned by action) back into its original position.
- case BlockActionKind::Erase: {
- auto &blockList = action.originalPosition.region->getBlocks();
- Block *insertBeforeBlock = action.originalPosition.insertBeforeBlock;
- blockList.insert((insertBeforeBlock ? Region::iterator(insertBeforeBlock)
- : blockList.end()),
- action.block);
- break;
- }
- // Put the instructions from the destination block (owned by the action)
- // back into the source block.
- case BlockActionKind::Inline: {
- Block *sourceBlock = action.inlineInfo.sourceBlock;
- if (action.inlineInfo.firstInlinedInst) {
- assert(action.inlineInfo.lastInlinedInst && "expected operation");
- sourceBlock->getOperations().splice(
- sourceBlock->begin(), action.block->getOperations(),
- Block::iterator(action.inlineInfo.firstInlinedInst),
- ++Block::iterator(action.inlineInfo.lastInlinedInst));
- }
- break;
- }
- // Move the block back to its original position.
- case BlockActionKind::Move: {
- Region *originalRegion = action.originalPosition.region;
- Block *insertBeforeBlock = action.originalPosition.insertBeforeBlock;
- originalRegion->getBlocks().splice(
- (insertBeforeBlock ? Region::iterator(insertBeforeBlock)
- : originalRegion->end()),
- action.block->getParent()->getBlocks(), action.block);
- break;
- }
- // Merge back the block that was split out.
- case BlockActionKind::Split: {
- action.originalBlock->getOperations().splice(
- action.originalBlock->end(), action.block->getOperations());
- action.block->dropAllDefinedValueUses();
- action.block->erase();
- break;
- }
- // Undo the type conversion.
- case BlockActionKind::TypeConversion: {
- argConverter.discardRewrites(action.block);
- break;
- }
- }
- }
- blockActions.resize(numActionsToKeep);
+void ConversionPatternRewriterImpl::undoRewrites(unsigned numRewritesToKeep) {
+ for (auto &rewrite :
+ llvm::reverse(llvm::drop_begin(rewrites, numRewritesToKeep)))
+ rewrite->rollback();
+ rewrites.resize(numRewritesToKeep);
}
LogicalResult ConversionPatternRewriterImpl::remapValues(
@@ -1304,7 +1466,7 @@ FailureOr<Block *> ConversionPatternRewriterImpl::convertBlockSignature(
return failure();
if (Block *newBlock = *result) {
if (newBlock != block)
- blockActions.push_back(BlockAction::getTypeConversion(newBlock));
+ appendRewrite<BlockTypeConversionRewrite>(newBlock);
}
return result;
}
@@ -1363,6 +1525,23 @@ LogicalResult ConversionPatternRewriterImpl::convertNonEntryRegionTypes(
//===----------------------------------------------------------------------===//
// Rewriter Notification Hooks
+void ConversionPatternRewriterImpl::notifyOperationInserted(
+ Operation *op, OpBuilder::InsertPoint previous) {
+ LLVM_DEBUG({
+ logger.startLine() << "** Insert : '" << op->getName() << "'(" << op
+ << ")\n";
+ });
+ if (!previous.isSet()) {
+ // This is a newly created op.
+ createdOps.push_back(op);
+ return;
+ }
+ Operation *prevOp = previous.getPoint() == previous.getBlock()->end()
+ ? nullptr
+ : &*previous.getPoint();
+ appendRewrite<MoveOperationRewrite>(op, previous.getBlock(), prevOp);
+}
+
void ConversionPatternRewriterImpl::notifyOpReplaced(Operation *op,
ValueRange newValues) {
assert(newValues.size() == op->getNumResults());
@@ -1395,28 +1574,28 @@ void ConversionPatternRewriterImpl::notifyOpReplaced(Operation *op,
void ConversionPatternRewriterImpl::notifyBlockIsBeingErased(Block *block) {
Region *region = block->getParent();
Block *origNextBlock = block->getNextNode();
- blockActions.push_back(BlockAction::getErase(block, {region, origNextBlock}));
+ appendRewrite<EraseBlockRewrite>(block, region, origNextBlock);
}
-void ConversionPatternRewriterImpl::notifyInsertedBlock(
+void ConversionPatternRewriterImpl::notifyBlockInserted(
Block *block, Region *previous, Region::iterator previousIt) {
if (!previous) {
// This is a newly created block.
- blockActions.push_back(BlockAction::getCreate(block));
+ appendRewrite<CreateBlockRewrite>(block);
return;
}
Block *prevBlock = previousIt == previous->end() ? nullptr : &*previousIt;
- blockActions.push_back(BlockAction::getMove(block, {previous, prevBlock}));
+ appendRewrite<MoveBlockRewrite>(block, previous, prevBlock);
}
void ConversionPatternRewriterImpl::notifySplitBlock(Block *block,
Block *continuation) {
- blockActions.push_back(BlockAction::getSplit(continuation, block));
+ appendRewrite<SplitBlockRewrite>(continuation, block);
}
void ConversionPatternRewriterImpl::notifyBlockBeingInlined(
Block *block, Block *srcBlock, Block::iterator before) {
- blockActions.push_back(BlockAction::getInline(block, srcBlock, before));
+ appendRewrite<InlineBlockRewrite>(block, srcBlock, before);
}
void ConversionPatternRewriterImpl::notifyMatchFailure(
@@ -1437,7 +1616,7 @@ void ConversionPatternRewriterImpl::notifyMatchFailure(
ConversionPatternRewriter::ConversionPatternRewriter(MLIRContext *ctx)
: PatternRewriter(ctx),
impl(new detail::ConversionPatternRewriterImpl(*this)) {
- setListener(this);
+ setListener(impl.get());
}
ConversionPatternRewriter::~ConversionPatternRewriter() = default;
@@ -1486,8 +1665,8 @@ void ConversionPatternRewriter::eraseBlock(Block *block) {
for (Operation &op : *block)
eraseOp(&op);
- // Unlink the block from its parent region. The block is kept in the block
- // action and will be actually destroyed when rewrites are applied. This
+ // Unlink the block from its parent region. The block is kept in the rewrite
+ // object and will be actually destroyed when rewrites are applied. This
// allows us to keep the operations in the block live and undo the removal by
// re-inserting the block.
block->getParent()->getBlocks().remove(block);
@@ -1540,11 +1719,6 @@ ConversionPatternRewriter::getRemappedValues(ValueRange keys,
results);
}
-void ConversionPatternRewriter::notifyBlockInserted(
- Block *block, Region *previous, Region::iterator previousIt) {
- impl->notifyInsertedBlock(block, previous, previousIt);
-}
-
Block *ConversionPatternRewriter::splitBlock(Block *block,
Block::iterator before) {
auto *continuation = block->splitBlock(before);
@@ -1572,16 +1746,6 @@ void ConversionPatternRewriter::inlineBlockBefore(Block *source, Block *dest,
eraseBlock(source);
}
-void ConversionPatternRewriter::notifyOperationInserted(Operation *op,
- InsertPoint previous) {
- assert(!previous.isSet() && "expected newly created op");
- LLVM_DEBUG({
- impl->logger.startLine()
- << "** Insert : '" << op->getName() << "'(" << op << ")\n";
- });
- impl->createdOps.push_back(op);
-}
-
void ConversionPatternRewriter::startOpModification(Operation *op) {
#ifndef NDEBUG
impl->pendingRootUpdates.insert(op);
@@ -1614,23 +1778,6 @@ void ConversionPatternRewriter::cancelOpModification(Operation *op) {
rootUpdates.erase(rootUpdates.begin() + updateIdx);
}
-void ConversionPatternRewriter::notifyMatchFailure(
- Location loc, function_ref<void(Diagnostic &)> reasonCallback) {
- impl->notifyMatchFailure(loc, reasonCallback);
-}
-
-void ConversionPatternRewriter::moveOpBefore(Operation *op, Block *block,
- Block::iterator iterator) {
- llvm_unreachable(
- "moving single ops is not supported in a dialect conversion");
-}
-
-void ConversionPatternRewriter::moveOpAfter(Operation *op, Block *block,
- Block::iterator iterator) {
- llvm_unreachable(
- "moving single ops is not supported in a dialect conversion");
-}
-
detail::ConversionPatternRewriterImpl &ConversionPatternRewriter::getImpl() {
return *impl;
}
@@ -1705,11 +1852,11 @@ private:
RewriterState &curState);
/// Legalizes the actions registered during the execution of a pattern.
- LogicalResult legalizePatternBlockActions(Operation *op,
- ConversionPatternRewriter &rewriter,
- ConversionPatternRewriterImpl &impl,
- RewriterState &state,
- RewriterState &newState);
+ LogicalResult
+ legalizePatternBlockRewrites(Operation *op,
+ ConversionPatternRewriter &rewriter,
+ ConversionPatternRewriterImpl &impl,
+ RewriterState &state, RewriterState &newState);
LogicalResult legalizePatternCreatedOperations(
ConversionPatternRewriter &rewriter, ConversionPatternRewriterImpl &impl,
RewriterState &state, RewriterState &newState);
@@ -1991,8 +2138,8 @@ OperationLegalizer::legalizePatternResult(Operation *op, const Pattern &pattern,
// Legalize each of the actions registered during application.
RewriterState newState = impl.getCurrentState();
- if (failed(legalizePatternBlockActions(op, rewriter, impl, curState,
- newState)) ||
+ if (failed(legalizePatternBlockRewrites(op, rewriter, impl, curState,
+ newState)) ||
failed(legalizePatternRootUpdates(rewriter, impl, curState, newState)) ||
failed(legalizePatternCreatedOperations(rewriter, impl, curState,
newState))) {
@@ -2003,7 +2150,7 @@ OperationLegalizer::legalizePatternResult(Operation *op, const Pattern &pattern,
return success();
}
-LogicalResult OperationLegalizer::legalizePatternBlockActions(
+LogicalResult OperationLegalizer::legalizePatternBlockRewrites(
Operation *op, ConversionPatternRewriter &rewriter,
ConversionPatternRewriterImpl &impl, RewriterState &state,
RewriterState &newState) {
@@ -2011,22 +2158,22 @@ LogicalResult OperationLegalizer::legalizePatternBlockActions(
// If the pattern moved or created any blocks, make sure the types of block
// arguments get legalized.
- for (int i = state.numBlockActions, e = newState.numBlockActions; i != e;
- ++i) {
- auto &action = impl.blockActions[i];
- if (action.kind == BlockActionKind::TypeConversion ||
- action.kind == BlockActionKind::Erase)
+ for (int i = state.numRewrites, e = newState.numRewrites; i != e; ++i) {
+ BlockRewrite *rewrite = dyn_cast<BlockRewrite>(impl.rewrites[i].get());
+ if (!rewrite)
+ continue;
+ Block *block = rewrite->getBlock();
+ if (isa<BlockTypeConversionRewrite, EraseBlockRewrite>(rewrite))
continue;
// Only check blocks outside of the current operation.
- Operation *parentOp = action.block->getParentOp();
- if (!parentOp || parentOp == op || action.block->getNumArguments() == 0)
+ Operation *parentOp = block->getParentOp();
+ if (!parentOp || parentOp == op || block->getNumArguments() == 0)
continue;
// If the region of the block has a type converter, try to convert the block
// directly.
- if (auto *converter =
- impl.argConverter.getConverter(action.block->getParent())) {
- if (failed(impl.convertBlockSignature(action.block, converter))) {
+ if (auto *converter = impl.argConverter.getConverter(block->getParent())) {
+ if (failed(impl.convertBlockSignature(block, converter))) {
LLVM_DEBUG(logFailure(impl.logger, "failed to convert types of moved "
"block"));
return failure();
@@ -2047,9 +2194,9 @@ LogicalResult OperationLegalizer::legalizePatternBlockActions(
// If this operation should be considered for re-legalization, try it.
if (operationsToIgnore.insert(parentOp).second &&
failed(legalize(parentOp, rewriter))) {
- LLVM_DEBUG(logFailure(
- impl.logger, "operation '{0}'({1}) became illegal after block action",
- parentOp->getName(), parentOp));
+ LLVM_DEBUG(logFailure(impl.logger,
+ "operation '{0}'({1}) became illegal after rewrite",
+ parentOp->getName(), parentOp));
return failure();
}
}
@@ -3130,6 +3277,27 @@ struct AnyFunctionOpInterfaceSignatureConversion
};
} // namespace
+FailureOr<Operation *>
+mlir::convertOpResultTypes(Operation *op, ValueRange operands,
+ const TypeConverter &converter,
+ ConversionPatternRewriter &rewriter) {
+ assert(op && "Invalid op");
+ Location loc = op->getLoc();
+ if (converter.isLegal(op))
+ return rewriter.notifyMatchFailure(loc, "op already legal");
+
+ OperationState newOp(loc, op->getName());
+ newOp.addOperands(operands);
+
+ SmallVector<Type> newResultTypes;
+ if (failed(converter.convertTypes(op->getResultTypes(), newResultTypes)))
+ return rewriter.notifyMatchFailure(loc, "couldn't convert return types");
+
+ newOp.addTypes(newResultTypes);
+ newOp.addAttributes(op->getAttrs());
+ return rewriter.create(newOp);
+}
+
void mlir::populateFunctionOpInterfaceTypeConversionPattern(
StringRef functionLikeOpName, RewritePatternSet &patterns,
const TypeConverter &converter) {
diff --git a/mlir/python/CMakeLists.txt b/mlir/python/CMakeLists.txt
index 266b860..ed167af 100644
--- a/mlir/python/CMakeLists.txt
+++ b/mlir/python/CMakeLists.txt
@@ -482,6 +482,19 @@ declare_mlir_python_extension(MLIRPythonExtension.Dialects.Linalg.Pybind
MLIRCAPILinalg
)
+declare_mlir_python_extension(MLIRPythonExtension.Dialects.LLVM.Pybind
+ MODULE_NAME _mlirDialectsLLVM
+ ADD_TO_PARENT MLIRPythonSources.Dialects.llvm
+ ROOT_DIR "${PYTHON_SOURCE_DIR}"
+ SOURCES
+ DialectLLVM.cpp
+ PRIVATE_LINK_LIBS
+ LLVMSupport
+ EMBED_CAPI_LINK_LIBS
+ MLIRCAPIIR
+ MLIRCAPILLVM
+)
+
declare_mlir_python_extension(MLIRPythonExtension.Dialects.Quant.Pybind
MODULE_NAME _mlirDialectsQuant
ADD_TO_PARENT MLIRPythonSources.Dialects.quant
diff --git a/mlir/python/mlir/_mlir_libs/_mlir/ir.pyi b/mlir/python/mlir/_mlir_libs/_mlir/ir.pyi
index 344abb6..586bf7f 100644
--- a/mlir/python/mlir/_mlir_libs/_mlir/ir.pyi
+++ b/mlir/python/mlir/_mlir_libs/_mlir/ir.pyi
@@ -1442,7 +1442,17 @@ class DictAttr(Attribute):
@property
def typeid(self) -> TypeID: ...
-class F16Type(Type):
+class FloatType(Type):
+ @staticmethod
+ def isinstance(other: Type) -> bool: ...
+ def __init__(self, cast_from_type: Type) -> None: ...
+ @property
+ def width(self) -> int:
+ """
+ Returns the width of the floating-point type.
+ """
+
+class F16Type(FloatType):
static_typeid: ClassVar[TypeID] # value = <mlir._mlir_libs._TypeID object>
@staticmethod
def get(context: Optional[Context] = None) -> F16Type:
@@ -1455,7 +1465,7 @@ class F16Type(Type):
@property
def typeid(self) -> TypeID: ...
-class F32Type(Type):
+class F32Type(FloatType):
static_typeid: ClassVar[TypeID] # value = <mlir._mlir_libs._TypeID object>
@staticmethod
def get(context: Optional[Context] = None) -> F32Type:
@@ -1468,7 +1478,7 @@ class F32Type(Type):
@property
def typeid(self) -> TypeID: ...
-class F64Type(Type):
+class F64Type(FloatType):
static_typeid: ClassVar[TypeID] # value = <mlir._mlir_libs._TypeID object>
@staticmethod
def get(context: Optional[Context] = None) -> F64Type:
@@ -1502,7 +1512,7 @@ class FlatSymbolRefAttr(Attribute):
Returns the value of the FlatSymbolRef attribute as a string
"""
-class Float8E4M3B11FNUZType(Type):
+class Float8E4M3B11FNUZType(FloatType):
static_typeid: ClassVar[TypeID] # value = <mlir._mlir_libs._TypeID object>
@staticmethod
def get(context: Optional[Context] = None) -> Float8E4M3B11FNUZType:
@@ -1515,7 +1525,7 @@ class Float8E4M3B11FNUZType(Type):
@property
def typeid(self) -> TypeID: ...
-class Float8E4M3FNType(Type):
+class Float8E4M3FNType(FloatType):
static_typeid: ClassVar[TypeID] # value = <mlir._mlir_libs._TypeID object>
@staticmethod
def get(context: Optional[Context] = None) -> Float8E4M3FNType:
@@ -1528,7 +1538,7 @@ class Float8E4M3FNType(Type):
@property
def typeid(self) -> TypeID: ...
-class Float8E4M3FNUZType(Type):
+class Float8E4M3FNUZType(FloatType):
static_typeid: ClassVar[TypeID] # value = <mlir._mlir_libs._TypeID object>
@staticmethod
def get(context: Optional[Context] = None) -> Float8E4M3FNUZType:
@@ -1541,7 +1551,7 @@ class Float8E4M3FNUZType(Type):
@property
def typeid(self) -> TypeID: ...
-class Float8E5M2FNUZType(Type):
+class Float8E5M2FNUZType(FloatType):
static_typeid: ClassVar[TypeID] # value = <mlir._mlir_libs._TypeID object>
@staticmethod
def get(context: Optional[Context] = None) -> Float8E5M2FNUZType:
@@ -1554,7 +1564,7 @@ class Float8E5M2FNUZType(Type):
@property
def typeid(self) -> TypeID: ...
-class Float8E5M2Type(Type):
+class Float8E5M2Type(FloatType):
static_typeid: ClassVar[TypeID] # value = <mlir._mlir_libs._TypeID object>
@staticmethod
def get(context: Optional[Context] = None) -> Float8E5M2Type:
@@ -1601,7 +1611,7 @@ class FloatAttr(Attribute):
Returns the value of the float attribute
"""
-class FloatTF32Type(Type):
+class FloatTF32Type(FloatType):
static_typeid: ClassVar[TypeID] # value = <mlir._mlir_libs._TypeID object>
@staticmethod
def get(context: Optional[Context] = None) -> FloatTF32Type:
diff --git a/mlir/python/mlir/dialects/llvm.py b/mlir/python/mlir/dialects/llvm.py
index 7702543..8aa16e4 100644
--- a/mlir/python/mlir/dialects/llvm.py
+++ b/mlir/python/mlir/dialects/llvm.py
@@ -4,3 +4,4 @@
from ._llvm_ops_gen import *
from ._llvm_enum_gen import *
+from .._mlir_libs._mlirDialectsLLVM import *
diff --git a/mlir/test/CAPI/llvm.c b/mlir/test/CAPI/llvm.c
index aaec7b1..5a78fac 100644
--- a/mlir/test/CAPI/llvm.c
+++ b/mlir/test/CAPI/llvm.c
@@ -12,6 +12,7 @@
#include "mlir-c/Dialect/LLVM.h"
#include "mlir-c/BuiltinTypes.h"
#include "mlir-c/IR.h"
+#include "mlir-c/Support.h"
#include <assert.h>
#include <math.h>
@@ -73,11 +74,164 @@ static void testTypeCreation(MlirContext ctx) {
mlirTypeEqual(i32_i64_s, i32_i64_s_ref));
}
+// CHECK-LABEL: testStructTypeCreation
+static int testStructTypeCreation(MlirContext ctx) {
+ fprintf(stderr, "testStructTypeCreation");
+
+ // CHECK: !llvm.struct<()>
+ mlirTypeDump(mlirLLVMStructTypeLiteralGet(ctx, /*nFieldTypes=*/0,
+ /*fieldTypes=*/NULL,
+ /*isPacked=*/false));
+
+ MlirType i8 = mlirIntegerTypeGet(ctx, 8);
+ MlirType i32 = mlirIntegerTypeGet(ctx, 32);
+ MlirType i64 = mlirIntegerTypeGet(ctx, 64);
+ MlirType i8_i32_i64[] = {i8, i32, i64};
+ // CHECK: !llvm.struct<(i8, i32, i64)>
+ mlirTypeDump(
+ mlirLLVMStructTypeLiteralGet(ctx, sizeof(i8_i32_i64) / sizeof(MlirType),
+ i8_i32_i64, /*isPacked=*/false));
+ // CHECK: !llvm.struct<(i32)>
+ mlirTypeDump(mlirLLVMStructTypeLiteralGet(ctx, 1, &i32, /*isPacked=*/false));
+ MlirType i32_i32[] = {i32, i32};
+ // CHECK: !llvm.struct<packed (i32, i32)>
+ mlirTypeDump(mlirLLVMStructTypeLiteralGet(
+ ctx, sizeof(i32_i32) / sizeof(MlirType), i32_i32, /*isPacked=*/true));
+
+ MlirType literal =
+ mlirLLVMStructTypeLiteralGet(ctx, sizeof(i8_i32_i64) / sizeof(MlirType),
+ i8_i32_i64, /*isPacked=*/false);
+ // CHECK: num elements: 3
+ // CHECK: i8
+ // CHECK: i32
+ // CHECK: i64
+ fprintf(stderr, "num elements: %ld\n",
+ mlirLLVMStructTypeGetNumElementTypes(literal));
+ for (intptr_t i = 0; i < 3; ++i) {
+ mlirTypeDump(mlirLLVMStructTypeGetElementType(literal, i));
+ }
+
+ if (!mlirTypeEqual(
+ mlirLLVMStructTypeLiteralGet(ctx, 1, &i32, /*isPacked=*/false),
+ mlirLLVMStructTypeLiteralGet(ctx, 1, &i32, /*isPacked=*/false))) {
+ return 1;
+ }
+ if (mlirTypeEqual(
+ mlirLLVMStructTypeLiteralGet(ctx, 1, &i32, /*isPacked=*/false),
+ mlirLLVMStructTypeLiteralGet(ctx, 1, &i64, /*isPacked=*/false))) {
+ return 2;
+ }
+
+ // CHECK: !llvm.struct<"foo", opaque>
+ // CHECK: !llvm.struct<"bar", opaque>
+ mlirTypeDump(mlirLLVMStructTypeIdentifiedGet(
+ ctx, mlirStringRefCreateFromCString("foo")));
+ mlirTypeDump(mlirLLVMStructTypeIdentifiedGet(
+ ctx, mlirStringRefCreateFromCString("bar")));
+
+ if (!mlirTypeEqual(mlirLLVMStructTypeIdentifiedGet(
+ ctx, mlirStringRefCreateFromCString("foo")),
+ mlirLLVMStructTypeIdentifiedGet(
+ ctx, mlirStringRefCreateFromCString("foo")))) {
+ return 3;
+ }
+ if (mlirTypeEqual(mlirLLVMStructTypeIdentifiedGet(
+ ctx, mlirStringRefCreateFromCString("foo")),
+ mlirLLVMStructTypeIdentifiedGet(
+ ctx, mlirStringRefCreateFromCString("bar")))) {
+ return 4;
+ }
+
+ MlirType fooStruct = mlirLLVMStructTypeIdentifiedGet(
+ ctx, mlirStringRefCreateFromCString("foo"));
+ MlirStringRef name = mlirLLVMStructTypeGetIdentifier(fooStruct);
+ if (memcmp(name.data, "foo", name.length))
+ return 5;
+ if (!mlirLLVMStructTypeIsOpaque(fooStruct))
+ return 6;
+
+ MlirType i32_i64[] = {i32, i64};
+ MlirLogicalResult result =
+ mlirLLVMStructTypeSetBody(fooStruct, sizeof(i32_i64) / sizeof(MlirType),
+ i32_i64, /*isPacked=*/false);
+ if (!mlirLogicalResultIsSuccess(result))
+ return 7;
+
+ // CHECK: !llvm.struct<"foo", (i32, i64)>
+ mlirTypeDump(fooStruct);
+ if (mlirLLVMStructTypeIsOpaque(fooStruct))
+ return 8;
+ if (mlirLLVMStructTypeIsPacked(fooStruct))
+ return 9;
+ if (!mlirTypeEqual(mlirLLVMStructTypeIdentifiedGet(
+ ctx, mlirStringRefCreateFromCString("foo")),
+ fooStruct)) {
+ return 10;
+ }
+
+ MlirType barStruct = mlirLLVMStructTypeIdentifiedGet(
+ ctx, mlirStringRefCreateFromCString("bar"));
+ result = mlirLLVMStructTypeSetBody(barStruct, 1, &i32, /*isPacked=*/true);
+ if (!mlirLogicalResultIsSuccess(result))
+ return 11;
+
+ // CHECK: !llvm.struct<"bar", packed (i32)>
+ mlirTypeDump(barStruct);
+ if (!mlirLLVMStructTypeIsPacked(barStruct))
+ return 12;
+
+ // Same body, should succeed.
+ result =
+ mlirLLVMStructTypeSetBody(fooStruct, sizeof(i32_i64) / sizeof(MlirType),
+ i32_i64, /*isPacked=*/false);
+ if (!mlirLogicalResultIsSuccess(result))
+ return 13;
+
+ // Different body, should fail.
+ result = mlirLLVMStructTypeSetBody(fooStruct, 1, &i32, /*isPacked=*/false);
+ if (mlirLogicalResultIsSuccess(result))
+ return 14;
+
+ // Packed flag differs, should fail.
+ result = mlirLLVMStructTypeSetBody(barStruct, 1, &i32, /*isPacked=*/false);
+ if (mlirLogicalResultIsSuccess(result))
+ return 15;
+
+ // Should have a different name.
+ // CHECK: !llvm.struct<"foo{{[^"]+}}
+ mlirTypeDump(mlirLLVMStructTypeIdentifiedNewGet(
+ ctx, mlirStringRefCreateFromCString("foo"), /*nFieldTypes=*/0,
+ /*fieldTypes=*/NULL, /*isPacked=*/false));
+
+ // Two freshly created "new" types must differ.
+ if (mlirTypeEqual(
+ mlirLLVMStructTypeIdentifiedNewGet(
+ ctx, mlirStringRefCreateFromCString("foo"), /*nFieldTypes=*/0,
+ /*fieldTypes=*/NULL, /*isPacked=*/false),
+ mlirLLVMStructTypeIdentifiedNewGet(
+ ctx, mlirStringRefCreateFromCString("foo"), /*nFieldTypes=*/0,
+ /*fieldTypes=*/NULL, /*isPacked=*/false))) {
+ return 16;
+ }
+
+ MlirType opaque = mlirLLVMStructTypeOpaqueGet(
+ ctx, mlirStringRefCreateFromCString("opaque"));
+ // CHECK: !llvm.struct<"opaque", opaque>
+ mlirTypeDump(opaque);
+ if (!mlirLLVMStructTypeIsOpaque(opaque))
+ return 17;
+
+ return 0;
+}
+
int main(void) {
MlirContext ctx = mlirContextCreate();
mlirDialectHandleRegisterDialect(mlirGetDialectHandle__llvm__(), ctx);
mlirContextGetOrLoadDialect(ctx, mlirStringRefCreateFromCString("llvm"));
testTypeCreation(ctx);
+ int result = testStructTypeCreation(ctx);
mlirContextDestroy(ctx);
- return 0;
+ if (result)
+ fprintf(stderr, "FAILED: code %d", result);
+ return result;
}
diff --git a/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir b/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
index 09a873f..dbf8ead 100644
--- a/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
+++ b/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
@@ -590,12 +590,12 @@ func.func @mbarrier_txcount() {
}
- %phase = arith.constant 0 : index
+ %phase_c0 = arith.constant 0 : i1
%ticks = arith.constant 10000000 : index
// CHECK: %[[base3:.+]] = llvm.extractvalue %[[barStr]][1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: %[[barPtr3:.+]] = llvm.getelementptr %[[base3]][%[[mid]]] : (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, i64
// CHECK: nvvm.mbarrier.try_wait.parity.shared %[[barPtr3]]
- nvgpu.mbarrier.try_wait.parity %barrier[%c0], %phase, %ticks : !barrierType
+ nvgpu.mbarrier.try_wait.parity %barrier[%c0], %phase_c0, %ticks : !barrierType
func.return
}
@@ -626,12 +626,12 @@ func.func @mbarrier_txcount_pred() {
// CHECK: nvvm.mbarrier.arrive.expect_tx.shared %[[barPtr2]], {{.*}}, predicate = %[[P]]
nvgpu.mbarrier.arrive.expect_tx %barrier[%c0], %txcount, predicate = %pred : !barrierType
- %phase = arith.constant 0 : index
+ %phase_c0 = arith.constant 0 : i1
%ticks = arith.constant 10000000 : index
// CHECK: %[[base3:.+]] = llvm.extractvalue %[[barStr]][1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: %[[barPtr3:.+]] = llvm.getelementptr %[[base3]][%[[mid]]] : (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, i64
// CHECK: nvvm.mbarrier.try_wait.parity.shared %[[barPtr3]]
- nvgpu.mbarrier.try_wait.parity %barrier[%c0], %phase, %ticks : !barrierType
+ nvgpu.mbarrier.try_wait.parity %barrier[%c0], %phase_c0, %ticks : !barrierType
func.return
}
diff --git a/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir b/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir
index 3fbeaeb..ae3bb6c 100644
--- a/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir
+++ b/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir
@@ -320,8 +320,11 @@ llvm.func @_QPsb() {
// CHECK-LABEL: @_QPsimple_reduction
// CHECK: %[[RED_ACCUMULATOR:.*]] = llvm.alloca %{{.*}} x i32 {bindc_name = "x", uniq_name = "_QFsimple_reductionEx"} : (i64) -> !llvm.ptr
// CHECK: omp.parallel
-// CHECK: omp.wsloop reduction(@eqv_reduction -> %[[RED_ACCUMULATOR]] : !llvm.ptr) for
-// CHECK: omp.reduction %{{.*}}, %[[RED_ACCUMULATOR]] : i32, !llvm.ptr
+// CHECK: omp.wsloop reduction(@eqv_reduction %{{.+}} -> %[[PRV:.+]] : !llvm.ptr) for
+// CHECK: %[[LPRV:.+]] = llvm.load %[[PRV]] : !llvm.ptr -> i32
+// CHECK: %[[CMP:.+]] = llvm.icmp "eq" %{{.*}}, %[[LPRV]] : i32
+// CHECK: %[[ZEXT:.+]] = llvm.zext %[[CMP]] : i1 to i32
+// CHECK: llvm.store %[[ZEXT]], %[[PRV]] : i32, !llvm.ptr
// CHECK: omp.yield
// CHECK: omp.terminator
// CHECK: llvm.return
@@ -350,14 +353,17 @@ llvm.func @_QPsimple_reduction(%arg0: !llvm.ptr {fir.bindc_name = "y"}) {
llvm.store %5, %4 : i32, !llvm.ptr
omp.parallel {
%6 = llvm.alloca %3 x i32 {adapt.valuebyref, in_type = i32, operandSegmentSizes = array<i32: 0, 0>, pinned} : (i64) -> !llvm.ptr
- omp.wsloop reduction(@eqv_reduction -> %4 : !llvm.ptr) for (%arg1) : i32 = (%1) to (%0) inclusive step (%1) {
+ omp.wsloop reduction(@eqv_reduction %4 -> %prv : !llvm.ptr) for (%arg1) : i32 = (%1) to (%0) inclusive step (%1) {
llvm.store %arg1, %6 : i32, !llvm.ptr
%7 = llvm.load %6 : !llvm.ptr -> i32
%8 = llvm.sext %7 : i32 to i64
%9 = llvm.sub %8, %3 : i64
%10 = llvm.getelementptr %arg0[0, %9] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.array<100 x i32>
%11 = llvm.load %10 : !llvm.ptr -> i32
- omp.reduction %11, %4 : i32, !llvm.ptr
+ %12 = llvm.load %prv : !llvm.ptr -> i32
+ %13 = llvm.icmp "eq" %11, %12 : i32
+ %14 = llvm.zext %13 : i1 to i32
+ llvm.store %14, %prv : i32, !llvm.ptr
omp.yield
}
omp.terminator
diff --git a/mlir/test/Conversion/SCFToOpenMP/reductions.mlir b/mlir/test/Conversion/SCFToOpenMP/reductions.mlir
index faf5ec4..a670464 100644
--- a/mlir/test/Conversion/SCFToOpenMP/reductions.mlir
+++ b/mlir/test/Conversion/SCFToOpenMP/reductions.mlir
@@ -27,13 +27,15 @@ func.func @reduction1(%arg0 : index, %arg1 : index, %arg2 : index,
%zero = arith.constant 0.0 : f32
// CHECK: omp.parallel
// CHECK: omp.wsloop
- // CHECK-SAME: reduction(@[[$REDF]] -> %[[BUF]]
+ // CHECK-SAME: reduction(@[[$REDF]] %[[BUF]] -> %[[PVT_BUF:[a-z0-9]+]]
// CHECK: memref.alloca_scope
scf.parallel (%i0, %i1) = (%arg0, %arg1) to (%arg2, %arg3)
step (%arg4, %step) init (%zero) -> (f32) {
// CHECK: %[[CST_INNER:.*]] = arith.constant 1.0
%one = arith.constant 1.0 : f32
- // CHECK: omp.reduction %[[CST_INNER]], %[[BUF]]
+ // CHECK: %[[PVT_VAL:.*]] = llvm.load %[[PVT_BUF]] : !llvm.ptr -> f32
+ // CHECK: %[[ADD_RESULT:.*]] = arith.addf %[[PVT_VAL]], %[[CST_INNER]] : f32
+ // CHECK: llvm.store %[[ADD_RESULT]], %[[PVT_BUF]] : f32, !llvm.ptr
scf.reduce(%one : f32) {
^bb0(%lhs : f32, %rhs: f32):
%res = arith.addf %lhs, %rhs : f32
@@ -103,10 +105,15 @@ func.func @reduction_muli(%arg0 : index, %arg1 : index, %arg2 : index,
%arg3 : index, %arg4 : index) {
%step = arith.constant 1 : index
%one = arith.constant 1 : i32
+ // CHECK: %[[RED_VAR:.*]] = llvm.alloca %{{.*}} x i32 : (i64) -> !llvm.ptr
+ // CHECK: omp.wsloop reduction(@[[$REDI]] %[[RED_VAR]] -> %[[RED_PVT_VAR:.*]] : !llvm.ptr)
scf.parallel (%i0, %i1) = (%arg0, %arg1) to (%arg2, %arg3)
step (%arg4, %step) init (%one) -> (i32) {
- // CHECK: omp.reduction
+ // CHECK: %[[C2:.*]] = arith.constant 2 : i32
%pow2 = arith.constant 2 : i32
+ // CHECK: %[[RED_PVT_VAL:.*]] = llvm.load %[[RED_PVT_VAR]] : !llvm.ptr -> i32
+ // CHECK: %[[MUL_RESULT:.*]] = arith.muli %[[RED_PVT_VAL]], %[[C2]] : i32
+ // CHECK: llvm.store %[[MUL_RESULT]], %[[RED_PVT_VAR]] : i32, !llvm.ptr
scf.reduce(%pow2 : i32) {
^bb0(%lhs : i32, %rhs: i32):
%res = arith.muli %lhs, %rhs : i32
@@ -199,16 +206,23 @@ func.func @reduction4(%arg0 : index, %arg1 : index, %arg2 : index,
// CHECK: omp.parallel
// CHECK: omp.wsloop
- // CHECK-SAME: reduction(@[[$REDF1]] -> %[[BUF1]]
- // CHECK-SAME: @[[$REDF2]] -> %[[BUF2]]
+ // CHECK-SAME: reduction(@[[$REDF1]] %[[BUF1]] -> %[[PVT_BUF1:[a-z0-9]+]]
+ // CHECK-SAME: @[[$REDF2]] %[[BUF2]] -> %[[PVT_BUF2:[a-z0-9]+]]
// CHECK: memref.alloca_scope
%res:2 = scf.parallel (%i0, %i1) = (%arg0, %arg1) to (%arg2, %arg3)
step (%arg4, %step) init (%zero, %ione) -> (f32, i64) {
+ // CHECK: %[[CST_ONE:.*]] = arith.constant 1.0{{.*}} : f32
%one = arith.constant 1.0 : f32
- // CHECK: arith.fptosi
+ // CHECK: %[[CST_INT_ONE:.*]] = arith.fptosi
%1 = arith.fptosi %one : f32 to i64
- // CHECK: omp.reduction %{{.*}}, %[[BUF1]]
- // CHECK: omp.reduction %{{.*}}, %[[BUF2]]
+ // CHECK: %[[PVT_VAL1:.*]] = llvm.load %[[PVT_BUF1]] : !llvm.ptr -> f32
+ // CHECK: %[[TEMP1:.*]] = arith.cmpf oge, %[[PVT_VAL1]], %[[CST_ONE]] : f32
+ // CHECK: %[[CMP_VAL1:.*]] = arith.select %[[TEMP1]], %[[PVT_VAL1]], %[[CST_ONE]] : f32
+ // CHECK: llvm.store %[[CMP_VAL1]], %[[PVT_BUF1]] : f32, !llvm.ptr
+ // CHECK: %[[PVT_VAL2:.*]] = llvm.load %[[PVT_BUF2]] : !llvm.ptr -> i64
+ // CHECK: %[[TEMP2:.*]] = arith.cmpi slt, %[[PVT_VAL2]], %[[CST_INT_ONE]] : i64
+ // CHECK: %[[CMP_VAL2:.*]] = arith.select %[[TEMP2]], %[[CST_INT_ONE]], %[[PVT_VAL2]] : i64
+ // CHECK: llvm.store %[[CMP_VAL2]], %[[PVT_BUF2]] : i64, !llvm.ptr
scf.reduce(%one, %1 : f32, i64) {
^bb0(%lhs : f32, %rhs: f32):
%cmp = arith.cmpf oge, %lhs, %rhs : f32
diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
index 1c13b16..a46f2e1 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
@@ -2460,3 +2460,40 @@ func.func @make_fixed_vector_of_scalable_vector(%f : f64) -> vector<3x[2]xf64>
%res = vector.broadcast %f : f64 to vector<3x[2]xf64>
return %res : vector<3x[2]xf64>
}
+
+// -----
+
+// CHECK-LABEL: @vector_interleave_0d
+// CHECK-SAME: %[[LHS:.*]]: vector<i8>, %[[RHS:.*]]: vector<i8>)
+func.func @vector_interleave_0d(%a: vector<i8>, %b: vector<i8>) -> vector<2xi8> {
+ // CHECK: %[[LHS_RANK1:.*]] = builtin.unrealized_conversion_cast %[[LHS]] : vector<i8> to vector<1xi8>
+ // CHECK: %[[RHS_RANK1:.*]] = builtin.unrealized_conversion_cast %[[RHS]] : vector<i8> to vector<1xi8>
+ // CHECK: %[[ZIP:.*]] = llvm.shufflevector %[[LHS_RANK1]], %[[RHS_RANK1]] [0, 1] : vector<1xi8>
+ // CHECK: return %[[ZIP]]
+ %0 = vector.interleave %a, %b : vector<i8>
+ return %0 : vector<2xi8>
+}
+
+// -----
+
+// CHECK-LABEL: @vector_interleave_1d
+// CHECK-SAME: %[[LHS:.*]]: vector<8xf32>, %[[RHS:.*]]: vector<8xf32>)
+func.func @vector_interleave_1d(%a: vector<8xf32>, %b: vector<8xf32>) -> vector<16xf32>
+{
+ // CHECK: %[[ZIP:.*]] = llvm.shufflevector %[[LHS]], %[[RHS]] [0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15] : vector<8xf32>
+ // CHECK: return %[[ZIP]]
+ %0 = vector.interleave %a, %b : vector<8xf32>
+ return %0 : vector<16xf32>
+}
+
+// -----
+
+// CHECK-LABEL: @vector_interleave_1d_scalable
+// CHECK-SAME: %[[LHS:.*]]: vector<[4]xi32>, %[[RHS:.*]]: vector<[4]xi32>)
+func.func @vector_interleave_1d_scalable(%a: vector<[4]xi32>, %b: vector<[4]xi32>) -> vector<[8]xi32>
+{
+ // CHECK: %[[ZIP:.*]] = "llvm.intr.experimental.vector.interleave2"(%[[LHS]], %[[RHS]]) : (vector<[4]xi32>, vector<[4]xi32>) -> vector<[8]xi32>
+ // CHECK: return %[[ZIP]]
+ %0 = vector.interleave %a, %b : vector<[4]xi32>
+ return %0 : vector<[8]xi32>
+}
diff --git a/mlir/test/Dialect/AMDGPU/transform_optimize_shmem_reads_writes.mlir b/mlir/test/Dialect/AMDGPU/transform_optimize_shmem_reads_writes.mlir
new file mode 100644
index 0000000..dfdd1b1
--- /dev/null
+++ b/mlir/test/Dialect/AMDGPU/transform_optimize_shmem_reads_writes.mlir
@@ -0,0 +1,64 @@
+// RUN: mlir-opt %s -transform-interpreter | FileCheck %s
+
+ // CHECK: @optimize_shmem([[arg0:%.+]]: memref<{{.*}}>, [[readRow:%.+]]: index, [[readCol:%.+]]: index, [[writeRow:%.+]]: index, [[writeCol:%.+]]: index, [[fragRow:%.+]]: index, [[fragCol:%.+]]: index, [[fragColPerm:%.+]]: index, [[stRow:%.+]]: index, [[stCol:%.+]]: index)
+ func.func @optimize_shmem(%arg0: memref<4096x4096xf16>,
+ %readRow: index, %readCol: index,
+ %writeRow: index, %writeCol: index,
+ %fragRow: index, %fragCol: index,
+ %fragColPerm: index,
+ %stRow: index, %stCol: index) {
+ // CHECK: %[[cst:.+]] = arith.constant 0.000000e+00 : f16
+ %cst = arith.constant 0.000000e+00 : f16
+
+ // CHECK: [[shmA:%.+]] = memref.alloc
+ // CHECK: [[shmB:%.+]] = memref.alloc
+ %shmA = memref.alloc() {alignment = 64 : i64} : memref<128x32xf16, 3>
+ %shmB = memref.alloc() {alignment = 64 : i64} : memref<256x32xf16, 3>
+
+ // CHECK: %[[D0:.+]] = vector.transfer_read [[arg0:%.+]][[[readRow:%.+]], [[readCol:%.+]]], [[cst:.+]] {in_bounds = [true, true]} : memref<4096x4096xf16>, vector<1x8xf16>
+ %0 = vector.transfer_read %arg0[%readRow, %readCol], %cst {in_bounds = [true, true]} : memref<4096x4096xf16>, vector<1x8xf16>
+ // CHECK: [[c7:%.+]] = arith.constant 7 : index
+ // CHECK: [[srcBits:%.+]] = arith.andi [[stRow:%.+]], [[c7]]
+ // CHECK: [[c2:%.+]] = arith.constant 2 : index
+ // CHECK: [[xorBits:%.+]] = arith.shli [[srcBits]], [[c2]]
+ // CHECK: [[stColPerm:%.+]] = arith.xori [[stCol:%.+]], [[xorBits]]
+ // CHECK: vector.transfer_write %[[D0:.+]], [[shmB]][[[writeRow:%.+]], [[writeCol:%.+]]] {in_bounds = [true, true]} : vector<1x8xf16>, memref<256x32xf16, 3>
+ vector.transfer_write %0, %shmB[%writeRow, %writeCol] {in_bounds = [true, true]} : vector<1x8xf16>, memref<256x32xf16, 3>
+ gpu.barrier
+ gpu.barrier
+ // CHECK: [[c7:%.+]] = arith.constant 7 : index
+ // CHECK: [[srcBits:%.+]] = arith.andi [[fragRow]], [[c7]]
+ // CHECK: [[c2:%.+]] = arith.constant 2 : index
+ // CHECK: [[xorBits:%.+]] = arith.shli [[srcBits]], [[c2]]
+ // CHECK: [[fragColPerm:%.+]] = arith.xori [[fragCol:%.+]], [[xorBits]]
+ // CHECK: vector.load [[shmB:%.+]][[[fragRow:%.+]], [[fragColPerm]]] : memref<256x32xf16, 3>, vector<8xf16>
+ %1 = vector.load %shmB[%fragRow, %fragColPerm] : memref<256x32xf16, 3>, vector<8xf16>
+
+ // CHECK: %[[D2:.+]] = vector.transfer_read [[arg0:%.+]][[[readRow:%.+]], [[readCol:%.+]]], [[cst:.+]] {in_bounds = [true, true]} : memref<4096x4096xf16>, vector<1x8xf16>
+ %2 = vector.transfer_read %arg0[%readRow, %readCol], %cst {in_bounds = [true, true]} : memref<4096x4096xf16>, vector<1x8xf16>
+ // CHECK: [[c7:%.+]] = arith.constant 7 : index
+ // CHECK: [[srcBits:%.+]] = arith.andi [[stRow:%.+]], [[c7]]
+ // CHECK: [[c2:%.+]] = arith.constant 2 : index
+ // CHECK: [[xorBits:%.+]] = arith.shli [[srcBits]], [[c2]]
+ // CHECK: [[stColPerm:%.+]] = arith.xori [[stCol:%.+]], [[xorBits]]
+ // CHECK: vector.transfer_write %[[D2:.+]], [[shmA:%.+]][[[writeRow:%.+]], [[writeCol:%.+]]] {in_bounds = [true, true]} : vector<1x8xf16>, memref<128x32xf16, 3>
+ vector.transfer_write %2, %shmA[%writeRow, %writeCol] {in_bounds = [true, true]} : vector<1x8xf16>, memref<128x32xf16, 3>
+ gpu.barrier
+ gpu.barrier
+ // CHECK: [[c7:%.+]] = arith.constant 7 : index
+ // CHECK: [[srcBits:%.+]] = arith.andi [[fragRow]], [[c7]]
+ // CHECK: [[c2:%.+]] = arith.constant 2 : index
+ // CHECK: [[xorBits:%.+]] = arith.shli [[srcBits]], [[c2]]
+ // CHECK: [[fragColPerm:%.+]] = arith.xori [[fragCol:%.+]], [[xorBits]]
+ // CHECK: vector.load [[shmA:%.+]][[[fragRow:%.+]], [[fragColPerm]]] : memref<128x32xf16, 3>, vector<8xf16>
+ %3 = vector.load %shmA[%fragRow, %fragColPerm] : memref<128x32xf16, 3>, vector<8xf16>
+ return
+ }
+
+module attributes { transform.with_named_sequence } {
+ transform.named_sequence @__transform_main(%root: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["func.func"]} in %root : (!transform.any_op) -> !transform.any_op
+ transform.amdgpu.optimize_shared_memory_reads_and_writes %0 : (!transform.any_op) -> ()
+ transform.yield
+ } // @__transform_main
+} // module
diff --git a/mlir/test/Dialect/GPU/test-nvvm-pipeline.mlir b/mlir/test/Dialect/GPU/test-nvvm-pipeline.mlir
new file mode 100644
index 0000000..07e7197
--- /dev/null
+++ b/mlir/test/Dialect/GPU/test-nvvm-pipeline.mlir
@@ -0,0 +1,30 @@
+// REQUIRES: host-supports-nvptx
+// RUN: mlir-opt %s \
+// RUN: | mlir-opt -gpu-lower-to-nvvm-pipeline="cubin-format=isa" \
+// RUN: | FileCheck %s
+
+// RUN: mlir-opt %s \
+// RUN: | mlir-opt -gpu-lower-to-nvvm-pipeline="cubin-format=isa" \
+// RUN: --mlir-print-ir-after=convert-gpu-to-nvvm 2>&1 \
+// RUN: | FileCheck %s --check-prefixes=CHECK-NVVM
+
+// This test checks whether the GPU region is compiled correctly to PTX by
+// pipeline. It doesn't test IR for GPU side, but it can test Host IR and
+// generated PTX.
+
+// CHECK-LABEL: llvm.func @test_math(%arg0: f32) {
+func.func @test_math(%arg0 : f32) {
+ %c2 = arith.constant 2 : index
+ %c1 = arith.constant 1 : index
+ // CHECK: gpu.launch_func @test_math_kernel::@test_math_kernel
+ // CHECK: gpu.binary @test_math_kernel [#gpu.object<#nvvm.target
+ gpu.launch
+ blocks(%0, %1, %2) in (%3 = %c1, %4 = %c1, %5 = %c1)
+ threads(%6, %7, %8) in (%9 = %c2, %10 = %c1, %11 = %c1) {
+ // CHECK-NVVM: __nv_expf
+ %s1 = math.exp %arg0 : f32
+ gpu.printf "%f" %s1 : f32
+ gpu.terminator
+ }
+ return
+} \ No newline at end of file
diff --git a/mlir/test/Dialect/LLVMIR/nvvm.mlir b/mlir/test/Dialect/LLVMIR/nvvm.mlir
index 0369f45..f35393c 100644
--- a/mlir/test/Dialect/LLVMIR/nvvm.mlir
+++ b/mlir/test/Dialect/LLVMIR/nvvm.mlir
@@ -43,6 +43,18 @@ func.func @llvm_nvvm_barrier0() {
llvm.return
}
+// CHECK-LABEL: @llvm_nvvm_barrier
+// CHECK-SAME: (%[[barId:.*]]: i32, %[[numberOfThreads:.*]]: i32)
+llvm.func @llvm_nvvm_barrier(%barId : i32, %numberOfThreads : i32) {
+ // CHECK: nvvm.barrier
+ nvvm.barrier
+ // CHECK: nvvm.barrier id = %[[barId]]
+ nvvm.barrier id = %barId
+ // CHECK: nvvm.barrier id = %[[barId]] number_of_threads = %[[numberOfThreads]]
+ nvvm.barrier id = %barId number_of_threads = %numberOfThreads
+ llvm.return
+}
+
// CHECK-LABEL: @llvm_nvvm_cluster_arrive
func.func @llvm_nvvm_cluster_arrive() {
// CHECK: nvvm.cluster.arrive
diff --git a/mlir/test/Dialect/LLVMIR/rocdl.mlir b/mlir/test/Dialect/LLVMIR/rocdl.mlir
index 5a14df9..89e8e78 100644
--- a/mlir/test/Dialect/LLVMIR/rocdl.mlir
+++ b/mlir/test/Dialect/LLVMIR/rocdl.mlir
@@ -35,6 +35,18 @@ func.func @rocdl.barrier() {
llvm.return
}
+func.func @rocdl.sched_barrier() {
+ // CHECK: rocdl.sched.barrier
+ rocdl.sched.barrier 0
+ llvm.return
+}
+
+func.func @rocdl.setprio() {
+ // CHECK: rocdl.s.setprio
+ rocdl.s.setprio 0
+ llvm.return
+}
+
func.func @rocdl.xdlops(%arg0 : f32, %arg1 : f32,
%arg2 : vector<32xf32>, %arg3 : i32,
%arg4 : vector<16xf32>, %arg5 : vector<4xf32>,
diff --git a/mlir/test/Dialect/Linalg/invalid.mlir b/mlir/test/Dialect/Linalg/invalid.mlir
index 56890df..916c04f 100644
--- a/mlir/test/Dialect/Linalg/invalid.mlir
+++ b/mlir/test/Dialect/Linalg/invalid.mlir
@@ -744,3 +744,29 @@ func.func @illegal_softmax_output_shape(%arg0: tensor<2x16x32xf32>) -> tensor<2x
-> tensor<2x16xf32>
return %1 : tensor<2x16xf32>
}
+
+// -----
+
+func.func @mmt4d_dims_mismatch(%A: tensor<16x16x8x1xf32>,
+ %B: tensor<16x16x8x1xf32>,
+ %C_in: tensor<16x16x8x1xf32>) -> tensor<16x16x8x1xf32> {
+ // expected-error @+1 {{inferred input/output operand #2 has shape's dimension #3 to be 8, but found 1}}
+ %res = linalg.mmt4d
+ ins(%A, %B: tensor<16x16x8x1xf32>, tensor<16x16x8x1xf32>)
+ outs(%C_in: tensor<16x16x8x1xf32>)
+ -> tensor<16x16x8x1xf32>
+ return %res : tensor<16x16x8x1xf32>
+}
+
+// -----
+
+func.func @mmt4d_rank_mismatch(%A: tensor<16x16x8x1xf32>,
+ %B: tensor<16x16x8x1xf32>,
+ %C_in: tensor<8x8xf32>) -> tensor<8x8xf32> {
+ // expected-error @+1 {{expected operand rank (2) to match the result rank of indexing_map #2 (4)}}
+ %res = linalg.mmt4d
+ ins(%A, %B: tensor<16x16x8x1xf32>, tensor<16x16x8x1xf32>)
+ outs(%C_in: tensor<8x8xf32>)
+ -> tensor<8x8xf32>
+ return %res : tensor<8x8xf32>
+}
diff --git a/mlir/test/Dialect/Linalg/named-ops.mlir b/mlir/test/Dialect/Linalg/named-ops.mlir
index 29977a7..7064e1b 100644
--- a/mlir/test/Dialect/Linalg/named-ops.mlir
+++ b/mlir/test/Dialect/Linalg/named-ops.mlir
@@ -1219,6 +1219,17 @@ func.func @batchmatmul_transpose_b(%arg0: memref<2x3x5xf32>, %arg1: memref<2x7x5
// -----
+// CHECK-LABEL: func @mmt4d
+func.func @mmt4d(%A: tensor<10x32x8x1xf32>, %B: tensor<80x32x4x1xf32>, %C: tensor<10x80x8x4xf32>) -> tensor<10x80x8x4xf32> {
+ // CHECK: %{{.+}} = linalg.mmt4d
+ // CHECK-SAME: ins(%{{.+}}, %{{.+}} : tensor<10x32x8x1xf32>, tensor<80x32x4x1xf32>)
+ // CHECK-SAME: outs(%{{.+}} : tensor<10x80x8x4xf32>) -> tensor<10x80x8x4xf32>
+ %0 = linalg.mmt4d ins(%A, %B : tensor<10x32x8x1xf32>, tensor<80x32x4x1xf32>) outs(%C: tensor<10x80x8x4xf32>) -> tensor<10x80x8x4xf32>
+ return %0: tensor<10x80x8x4xf32>
+}
+
+// -----
+
// CHECK-LABEL: func @batch_mmt4d
func.func @batch_mmt4d(%arg0: tensor<128x10x32x8x1xf32>, %arg1: tensor<128x80x32x4x1xf32>, %arg2: tensor<128x10x80x8x4xf32>) -> tensor<128x10x80x8x4xf32> {
// CHECK: %{{.+}} = linalg.batch_mmt4d
diff --git a/mlir/test/Dialect/Linalg/transform-op-mmt4d-to-fma.mlir b/mlir/test/Dialect/Linalg/transform-op-mmt4d-to-fma.mlir
new file mode 100644
index 0000000..6aba2b3
--- /dev/null
+++ b/mlir/test/Dialect/Linalg/transform-op-mmt4d-to-fma.mlir
@@ -0,0 +1,69 @@
+// RUN: mlir-opt %s -transform-interpreter | FileCheck %s
+
+func.func @mmt4d_to_fma(%A: tensor<16x16x8x1xf32>, %B: tensor<16x16x8x1xf32>, %C_in: tensor<16x16x8x8xf32>) -> tensor<16x16x8x8xf32> {
+ %res = linalg.mmt4d
+ ins(%A, %B: tensor<16x16x8x1xf32>, tensor<16x16x8x1xf32>)
+ outs(%C_in: tensor<16x16x8x8xf32>)
+ -> tensor<16x16x8x8xf32>
+ return %res : tensor<16x16x8x8xf32>
+}
+
+
+// CHECK-LABEL: @mmt4d_to_fma
+// CHECK-COUNT-8: vector.fma
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%module: !transform.any_op {transform.readonly}) {
+ %func = transform.structured.match ops{["func.func"]} in %module : (!transform.any_op) -> !transform.op<"func.func">
+
+ %mmt4d = transform.structured.match ops{["linalg.mmt4d"]} in %func : (!transform.op<"func.func">) -> !transform.any_op
+
+ // Step 1: Tile
+ // Tile parallel dims
+ %tiled_linalg_op_p, %loops:4 = transform.structured.tile_using_for %mmt4d[1, 1, 0, 8, 8, 0]
+ : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+ // Tile reduction dims
+ %tiled_linalg_op_r, %loops2:2 = transform.structured.tile_using_for %tiled_linalg_op_p[0, 0, 1, 0, 0, 1]
+ : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+
+ // Step 2: Vectorize
+ transform.structured.vectorize %tiled_linalg_op_r : !transform.any_op
+
+ // Step 3: Simplify
+ // vector.multi_reduction --> vector.contract
+ // Generates a 6-dim vector.contract with the dim matching the original MMT4D Op
+ // and with the following split into parallel and reduction dims:
+ // * parallel, parallel, reduction, parallel, parallel, reduction
+ transform.apply_patterns to %func {
+ transform.apply_patterns.vector.reduction_to_contract
+ // Reduce the rank of xfer ops. This transforms vector.contract to be
+ // more matmul-like and to enable the lowering to outer product Ops.
+ transform.apply_patterns.vector.transfer_permutation_patterns
+ } : !transform.op<"func.func">
+
+ // Hoisting and LICM - not strictly required
+ %func_h = transform.structured.hoist_redundant_vector_transfers %func
+ : (!transform.op<"func.func">) -> !transform.op<"func.func">
+ %all_loops = transform.structured.match interface{LoopLikeInterface} in %func_h
+ : (!transform.op<"func.func">) -> !transform.any_op
+ transform.apply_licm to %all_loops : !transform.any_op
+ transform.loop.hoist_loop_invariant_subsets %all_loops : !transform.any_op
+
+ // Simplify the 6-dim vector.contract into a 3-dim matmul-like
+ // vector.contract with the following split into parallel and reduction
+ // dims:
+ // * parallel, parallel, reduction
+ transform.apply_patterns to %func_h {
+ transform.apply_patterns.vector.reduction_to_contract
+ transform.apply_patterns.vector.cast_away_vector_leading_one_dim
+ transform.apply_patterns.canonicalization
+ } : !transform.op<"func.func">
+
+ // Step 4: Lower vector.contract to vector.fma via vector.outerproduct
+ transform.apply_patterns to %func_h {
+ transform.apply_patterns.vector.lower_contraction lowering_strategy = "outerproduct"
+ transform.apply_patterns.vector.lower_outerproduct
+ } : !transform.op<"func.func">
+ transform.yield
+ }
+}
diff --git a/mlir/test/Dialect/Linalg/vectorization-unsupported.mlir b/mlir/test/Dialect/Linalg/vectorization-unsupported.mlir
new file mode 100644
index 0000000..a1a5239
--- /dev/null
+++ b/mlir/test/Dialect/Linalg/vectorization-unsupported.mlir
@@ -0,0 +1,73 @@
+// RUN: mlir-opt %s -transform-interpreter -split-input-file -verify-diagnostics
+
+func.func @conv1d_nwc_wcf_dyn_ch_dim(%input: memref<4x6x?xf32>, %filter: memref<1x?x8xf32>, %output: memref<4x2x8xf32>) {
+ // expected-error @+1 {{Attempted to vectorize, but failed}}
+ linalg.conv_1d_nwc_wcf
+ {dilations = dense<1> : tensor<1xi64>, strides = dense<3> : tensor<1xi64>}
+ ins(%input, %filter : memref<4x6x?xf32>, memref<1x?x8xf32>)
+ outs(%output : memref<4x2x8xf32>)
+ return
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.conv_1d_nwc_wcf"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ transform.structured.vectorize %0 : !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+func.func @depthwise_conv1d_nwc_wc_dyn_ch_dim(%input: memref<3x5x?xf32>, %filter: memref<2x?xf32>, %output: memref<3x2x?xf32>) {
+ // expected-error @+1 {{Attempted to vectorize, but failed}}
+ linalg.depthwise_conv_1d_nwc_wc
+ {dilations = dense<2> : tensor<1xi64>, strides = dense<1> : tensor<1xi64>}
+ ins(%input, %filter : memref<3x5x?xf32>, memref<2x?xf32>)
+ outs(%output : memref<3x2x?xf32>)
+ return
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.depthwise_conv_1d_nwc_wc"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ transform.structured.vectorize %0 : !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+func.func @depthwise_conv1d_nwc_wc_dyn_w_dim(%input: memref<3x?x3xf32>, %filter: memref<2x3xf32>, %output: memref<3x?x3xf32>) {
+ // expected-error @+1 {{Attempted to vectorize, but failed}}
+ linalg.depthwise_conv_1d_nwc_wc
+ {dilations = dense<2> : tensor<1xi64>, strides = dense<1> : tensor<1xi64>}
+ ins(%input, %filter : memref<3x?x3xf32>, memref<2x3xf32>)
+ outs(%output : memref<3x?x3xf32>)
+ return
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.depthwise_conv_1d_nwc_wc"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ transform.structured.vectorize %0 : !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+func.func @conv1d_dyn_w_dim(%input: tensor<?xf32>, %filter: tensor<4xf32>, %output: tensor<?xf32>) -> tensor<?xf32> {
+ // expected-error @+1 {{Attempted to vectorize, but failed}}
+ %0 = linalg.conv_1d ins(%input, %filter : tensor<?xf32>, tensor<4xf32>)
+ outs(%output : tensor<?xf32>) -> tensor<?xf32>
+ return %0 : tensor<?xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.conv_1d"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ transform.structured.vectorize %0 : !transform.any_op
+ transform.yield
+ }
+}
diff --git a/mlir/test/Dialect/Linalg/vectorization.mlir b/mlir/test/Dialect/Linalg/vectorization.mlir
index 5d1bef4..0272ac5 100644
--- a/mlir/test/Dialect/Linalg/vectorization.mlir
+++ b/mlir/test/Dialect/Linalg/vectorization.mlir
@@ -639,6 +639,31 @@ module attributes {transform.with_named_sequence} {
// -----
+func.func @mmt4d(%A: memref<16x16x8x1xf32>, %B: memref<16x16x8x1xf32>, %C_in: memref<16x16x8x8xf32>) {
+ linalg.mmt4d ins(%A, %B: memref<16x16x8x1xf32>, memref<16x16x8x1xf32>)
+ outs(%C_in: memref<16x16x8x8xf32>)
+ return
+}
+
+// CHECK-LABEL: func.func @mmt4d(
+// CHECK-SAME: %[[A:.*]]: memref<16x16x8x1xf32>, %[[B:.*]]: memref<16x16x8x1xf32>, %[[C:.*]]: memref<16x16x8x8xf32>) {
+// CHECK: %[[VEC_A:.*]] = vector.transfer_read %[[A]]{{.*}} : memref<16x16x8x1xf32>, vector<16x16x16x8x8x1xf32>
+// CHECK: %[[VEC_B:.*]] = vector.transfer_read %[[B]]{{.*}} : memref<16x16x8x1xf32>, vector<16x16x16x8x8x1xf32>
+// CHECK: %[[VEC_C:.*]] = vector.transfer_read %[[C]]{{.*}} : memref<16x16x8x8xf32>, vector<16x16x8x8xf32>
+// CHECK: %[[MUL:.*]] = arith.mulf %[[VEC_A]], %[[VEC_B]] : vector<16x16x16x8x8x1xf32>
+// CHECK: %[[RED:.*]] = vector.multi_reduction <add>, %[[MUL]], %[[VEC_C]] [2, 5] : vector<16x16x16x8x8x1xf32> to vector<16x16x8x8xf32>
+// CHECK: vector.transfer_write %[[RED]], %[[C]]{{.*}} : vector<16x16x8x8xf32>, memref<16x16x8x8xf32>
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %mmt4d = transform.structured.match ops{["linalg.mmt4d"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ transform.structured.vectorize %mmt4d : !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
func.func @matmul_scalable(%A: memref<?x?xf32>, %B: memref<?x?xf32>, %C: memref<?x?xf32>) {
linalg.matmul ins(%A, %B: memref<?x?xf32>, memref<?x?xf32>)
outs(%C: memref<?x?xf32>)
diff --git a/mlir/test/Dialect/NVGPU/tmaload-transform.mlir b/mlir/test/Dialect/NVGPU/tmaload-transform.mlir
index 29e300a..40acd82 100644
--- a/mlir/test/Dialect/NVGPU/tmaload-transform.mlir
+++ b/mlir/test/Dialect/NVGPU/tmaload-transform.mlir
@@ -62,7 +62,7 @@ func.func @main() {
// CHECK: nvgpu.mbarrier.arrive.expect_tx %[[B]][%{{.*}}], %[[c0_7]] : <memorySpace = #gpu.address_space<workgroup>
// CHECK: }
//
- // CHECK: %[[c0_6:.*]] = arith.constant 0 : index
+ // CHECK: %[[c0_6:.*]] = llvm.mlir.constant(false) : i1
// CHECK: %[[c10000000:.*]] = arith.constant 10000000 : index
// CHECK: nvgpu.mbarrier.try_wait.parity %[[B]][%{{.*}}], %[[c0_6]], %[[c10000000]] : <memorySpace = #gpu.address_space<workgroup>
diff --git a/mlir/test/Dialect/OpenMP/invalid.mlir b/mlir/test/Dialect/OpenMP/invalid.mlir
index 812b79e..523a403 100644
--- a/mlir/test/Dialect/OpenMP/invalid.mlir
+++ b/mlir/test/Dialect/OpenMP/invalid.mlir
@@ -436,42 +436,13 @@ atomic {
// -----
-omp.reduction.declare @add_f32 : f32
-init {
-^bb0(%arg: f32):
- %0 = arith.constant 0.0 : f32
- omp.yield (%0 : f32)
-}
-combiner {
-^bb1(%arg0: f32, %arg1: f32):
- %1 = arith.addf %arg0, %arg1 : f32
- omp.yield (%1 : f32)
-}
-
-func.func @foo(%lb : index, %ub : index, %step : index) {
- %c1 = arith.constant 1 : i32
- %0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
- %1 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
-
- omp.wsloop reduction(@add_f32 -> %0 : !llvm.ptr)
- for (%iv) : index = (%lb) to (%ub) step (%step) {
- %2 = arith.constant 2.0 : f32
- // expected-error @below {{accumulator is not used by the parent}}
- omp.reduction %2, %1 : f32, !llvm.ptr
- omp.yield
- }
- return
-}
-
-// -----
-
func.func @foo(%lb : index, %ub : index, %step : index) {
%c1 = arith.constant 1 : i32
%0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
%1 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
// expected-error @below {{expected symbol reference @foo to point to a reduction declaration}}
- omp.wsloop reduction(@foo -> %0 : !llvm.ptr)
+ omp.wsloop reduction(@foo %0 -> %prv : !llvm.ptr)
for (%iv) : index = (%lb) to (%ub) step (%step) {
%2 = arith.constant 2.0 : f32
omp.reduction %2, %1 : f32, !llvm.ptr
@@ -499,7 +470,7 @@ func.func @foo(%lb : index, %ub : index, %step : index) {
%0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
// expected-error @below {{accumulator variable used more than once}}
- omp.wsloop reduction(@add_f32 -> %0 : !llvm.ptr, @add_f32 -> %0 : !llvm.ptr)
+ omp.wsloop reduction(@add_f32 %0 -> %prv : !llvm.ptr, @add_f32 %0 -> %prv1 : !llvm.ptr)
for (%iv) : index = (%lb) to (%ub) step (%step) {
%2 = arith.constant 2.0 : f32
omp.reduction %2, %0 : f32, !llvm.ptr
@@ -532,7 +503,7 @@ func.func @foo(%lb : index, %ub : index, %step : index, %mem : memref<1xf32>) {
%c1 = arith.constant 1 : i32
// expected-error @below {{expected accumulator ('memref<1xf32>') to be the same type as reduction declaration ('!llvm.ptr')}}
- omp.wsloop reduction(@add_f32 -> %mem : memref<1xf32>)
+ omp.wsloop reduction(@add_f32 %mem -> %prv : memref<1xf32>)
for (%iv) : index = (%lb) to (%ub) step (%step) {
%2 = arith.constant 2.0 : f32
omp.reduction %2, %mem : f32, memref<1xf32>
@@ -1651,6 +1622,15 @@ func.func @omp_target_enter_data(%map1: memref<?xi32>) {
// -----
+func.func @omp_target_enter_data_depend(%a: memref<?xi32>) {
+ %0 = omp.map_info var_ptr(%a: memref<?xi32>, tensor<?xi32>) map_clauses(to) capture(ByRef) -> memref<?xi32>
+ // expected-error @below {{op expected as many depend values as depend variables}}
+ omp.target_enter_data map_entries(%0: memref<?xi32> ) {operandSegmentSizes = array<i32: 0, 0, 1, 0>}
+ return
+}
+
+// -----
+
func.func @omp_target_exit_data(%map1: memref<?xi32>) {
%mapv = omp.map_info var_ptr(%map1 : memref<?xi32>, tensor<?xi32>) map_clauses(to) capture(ByRef) -> memref<?xi32> {name = ""}
// expected-error @below {{from, release and delete map types are permitted}}
@@ -1660,6 +1640,15 @@ func.func @omp_target_exit_data(%map1: memref<?xi32>) {
// -----
+func.func @omp_target_exit_data_depend(%a: memref<?xi32>) {
+ %0 = omp.map_info var_ptr(%a: memref<?xi32>, tensor<?xi32>) map_clauses(from) capture(ByRef) -> memref<?xi32>
+ // expected-error @below {{op expected as many depend values as depend variables}}
+ omp.target_exit_data map_entries(%0: memref<?xi32> ) {operandSegmentSizes = array<i32: 0, 0, 1, 0>}
+ return
+}
+
+// -----
+
func.func @omp_target_update_invalid_motion_type(%map1 : memref<?xi32>) {
%mapv = omp.map_info var_ptr(%map1 : memref<?xi32>, tensor<?xi32>) map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> memref<?xi32> {name = ""}
@@ -1732,9 +1721,91 @@ llvm.mlir.global internal @_QFsubEx() : i32
// -----
+func.func @omp_target_update_data_depend(%a: memref<?xi32>) {
+ %0 = omp.map_info var_ptr(%a: memref<?xi32>, tensor<?xi32>) map_clauses(to) capture(ByRef) -> memref<?xi32>
+ // expected-error @below {{op expected as many depend values as depend variables}}
+ omp.target_update_data motion_entries(%0: memref<?xi32> ) {operandSegmentSizes = array<i32: 0, 0, 1, 0>}
+ return
+}
+
+// -----
+
+func.func @omp_target_depend(%data_var: memref<i32>) {
+ // expected-error @below {{op expected as many depend values as depend variables}}
+ "omp.target"(%data_var) ({
+ "omp.terminator"() : () -> ()
+ }) {depends = [], operandSegmentSizes = array<i32: 0, 0, 0, 1, 0>} : (memref<i32>) -> ()
+ "func.return"() : () -> ()
+}
+
+// -----
+
func.func @omp_distribute(%data_var : memref<i32>) -> () {
// expected-error @below {{expected equal sizes for allocate and allocator variables}}
"omp.distribute"(%data_var) <{operandSegmentSizes = array<i32: 0, 1, 0>}> ({
"omp.terminator"() : () -> ()
}) : (memref<i32>) -> ()
}
+
+// -----
+
+omp.private {type = private} @x.privatizer : i32 alloc {
+^bb0(%arg0: i32):
+ %0 = arith.constant 0.0 : f32
+ // expected-error @below {{Invalid yielded value. Expected type: 'i32', got: 'f32'}}
+ omp.yield(%0 : f32)
+}
+
+// -----
+
+omp.private {type = private} @x.privatizer : i32 alloc {
+^bb0(%arg0: i32):
+ // expected-error @below {{Invalid yielded value. Expected type: 'i32', got: None}}
+ omp.yield
+}
+
+// -----
+
+omp.private {type = private} @x.privatizer : i32 alloc {
+^bb0(%arg0: i32):
+ // expected-error @below {{expected exit block terminator to be an `omp.yield` op.}}
+ omp.terminator
+}
+
+// -----
+
+// expected-error @below {{`alloc`: expected 1 region arguments, got: 2}}
+omp.private {type = private} @x.privatizer : f32 alloc {
+^bb0(%arg0: f32, %arg1: f32):
+ omp.yield(%arg0 : f32)
+}
+
+// -----
+
+// expected-error @below {{`copy`: expected 2 region arguments, got: 1}}
+omp.private {type = firstprivate} @x.privatizer : f32 alloc {
+^bb0(%arg0: f32):
+ omp.yield(%arg0 : f32)
+} copy {
+^bb0(%arg0: f32):
+ omp.yield(%arg0 : f32)
+}
+
+// -----
+
+// expected-error @below {{`private` clauses require only an `alloc` region.}}
+omp.private {type = private} @x.privatizer : f32 alloc {
+^bb0(%arg0: f32):
+ omp.yield(%arg0 : f32)
+} copy {
+^bb0(%arg0: f32, %arg1 : f32):
+ omp.yield(%arg0 : f32)
+}
+
+// -----
+
+// expected-error @below {{`firstprivate` clauses require both `alloc` and `copy` regions.}}
+omp.private {type = firstprivate} @x.privatizer : f32 alloc {
+^bb0(%arg0: f32):
+ omp.yield(%arg0 : f32)
+}
diff --git a/mlir/test/Dialect/OpenMP/ops.mlir b/mlir/test/Dialect/OpenMP/ops.mlir
index 6514059..99ca802 100644
--- a/mlir/test/Dialect/OpenMP/ops.mlir
+++ b/mlir/test/Dialect/OpenMP/ops.mlir
@@ -517,7 +517,7 @@ func.func @omp_target(%if_cond : i1, %device : si32, %num_threads : i32, %map1:
"omp.target"(%if_cond, %device, %num_threads) ({
// CHECK: omp.terminator
omp.terminator
- }) {nowait, operandSegmentSizes = array<i32: 1,1,1,0>} : ( i1, si32, i32 ) -> ()
+ }) {nowait, operandSegmentSizes = array<i32: 1,1,1,0,0>} : ( i1, si32, i32 ) -> ()
// Test with optional map clause.
// CHECK: %[[MAP_A:.*]] = omp.map_info var_ptr(%[[VAL_1:.*]] : memref<?xi32>, tensor<?xi32>) map_clauses(tofrom) capture(ByRef) -> memref<?xi32> {name = ""}
@@ -625,12 +625,17 @@ atomic {
func.func @wsloop_reduction(%lb : index, %ub : index, %step : index) {
%c1 = arith.constant 1 : i32
%0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
- // CHECK: reduction(@add_f32 -> %{{.+}} : !llvm.ptr)
- omp.wsloop reduction(@add_f32 -> %0 : !llvm.ptr)
+ // CHECK: reduction(@add_f32 %{{.+}} -> %[[PRV:.+]] : !llvm.ptr)
+ omp.wsloop reduction(@add_f32 %0 -> %prv : !llvm.ptr)
for (%iv) : index = (%lb) to (%ub) step (%step) {
- %1 = arith.constant 2.0 : f32
- // CHECK: omp.reduction %{{.+}}, %{{.+}}
- omp.reduction %1, %0 : f32, !llvm.ptr
+ // CHECK: %[[CST:.+]] = arith.constant 2.0{{.*}} : f32
+ %cst = arith.constant 2.0 : f32
+ // CHECK: %[[LPRV:.+]] = llvm.load %[[PRV]] : !llvm.ptr -> f32
+ %lprv = llvm.load %prv : !llvm.ptr -> f32
+ // CHECK: %[[RES:.+]] = llvm.fadd %[[LPRV]], %[[CST]] : f32
+ %res = llvm.fadd %lprv, %cst: f32
+ // CHECK: llvm.store %[[RES]], %[[PRV]] : f32, !llvm.ptr
+ llvm.store %res, %prv : f32, !llvm.ptr
omp.yield
}
return
@@ -788,12 +793,15 @@ combiner {
// CHECK-LABEL: func @wsloop_reduction2
func.func @wsloop_reduction2(%lb : index, %ub : index, %step : index) {
%0 = memref.alloca() : memref<1xf32>
- // CHECK: omp.wsloop reduction(@add2_f32 -> %{{.+}} : memref<1xf32>)
- omp.wsloop reduction(@add2_f32 -> %0 : memref<1xf32>)
+ // CHECK: omp.wsloop reduction(@add2_f32 %{{.+}} -> %{{.+}} : memref<1xf32>)
+ omp.wsloop reduction(@add2_f32 %0 -> %prv : memref<1xf32>)
for (%iv) : index = (%lb) to (%ub) step (%step) {
%1 = arith.constant 2.0 : f32
- // CHECK: omp.reduction
- omp.reduction %1, %0 : f32, memref<1xf32>
+ %2 = arith.constant 0 : index
+ %3 = memref.load %prv[%2] : memref<1xf32>
+ // CHECK: llvm.fadd
+ %4 = llvm.fadd %1, %3 : f32
+ memref.store %4, %prv[%2] : memref<1xf32>
omp.yield
}
return
@@ -1717,6 +1725,18 @@ func.func @omp_task_depend(%arg0: memref<i32>, %arg1: memref<i32>) {
return
}
+
+// CHECK-LABEL: @omp_target_depend
+// CHECK-SAME: (%arg0: memref<i32>, %arg1: memref<i32>) {
+func.func @omp_target_depend(%arg0: memref<i32>, %arg1: memref<i32>) {
+ // CHECK: omp.target depend(taskdependin -> %arg0 : memref<i32>, taskdependin -> %arg1 : memref<i32>, taskdependinout -> %arg0 : memref<i32>) {
+ omp.target depend(taskdependin -> %arg0 : memref<i32>, taskdependin -> %arg1 : memref<i32>, taskdependinout -> %arg0 : memref<i32>) {
+ // CHECK: omp.terminator
+ omp.terminator
+ } {operandSegmentSizes = array<i32: 0,0,0,3,0>}
+ return
+}
+
func.func @omp_threadprivate() {
%0 = arith.constant 1 : i32
%1 = arith.constant 2 : i32
@@ -2145,3 +2165,52 @@ func.func @omp_targets_is_allocatable(%arg0: !llvm.ptr, %arg1: !llvm.ptr) -> ()
}
return
}
+
+// CHECK-LABEL: func @omp_target_enter_update_exit_data_depend
+// CHECK-SAME:([[ARG0:%.*]]: memref<?xi32>, [[ARG1:%.*]]: memref<?xi32>, [[ARG2:%.*]]: memref<?xi32>) {
+func.func @omp_target_enter_update_exit_data_depend(%a: memref<?xi32>, %b: memref<?xi32>, %c: memref<?xi32>) {
+// CHECK-NEXT: [[MAP0:%.*]] = omp.map_info
+// CHECK-NEXT: [[MAP1:%.*]] = omp.map_info
+// CHECK-NEXT: [[MAP2:%.*]] = omp.map_info
+ %map_a = omp.map_info var_ptr(%a: memref<?xi32>, tensor<?xi32>) map_clauses(to) capture(ByRef) -> memref<?xi32>
+ %map_b = omp.map_info var_ptr(%b: memref<?xi32>, tensor<?xi32>) map_clauses(from) capture(ByRef) -> memref<?xi32>
+ %map_c = omp.map_info var_ptr(%c: memref<?xi32>, tensor<?xi32>) map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> memref<?xi32>
+
+ // Do some work on the host that writes to 'a'
+ omp.task depend(taskdependout -> %a : memref<?xi32>) {
+ "test.foo"(%a) : (memref<?xi32>) -> ()
+ omp.terminator
+ }
+
+ // Then map that over to the target
+ // CHECK: omp.target_enter_data nowait map_entries([[MAP0]], [[MAP2]] : memref<?xi32>, memref<?xi32>) depend(taskdependin -> [[ARG0]] : memref<?xi32>)
+ omp.target_enter_data nowait map_entries(%map_a, %map_c: memref<?xi32>, memref<?xi32>) depend(taskdependin -> %a: memref<?xi32>)
+
+ // Compute 'b' on the target and copy it back
+ // CHECK: omp.target map_entries([[MAP1]] -> {{%.*}} : memref<?xi32>) {
+ omp.target map_entries(%map_b -> %arg0 : memref<?xi32>) {
+ ^bb0(%arg0: memref<?xi32>) :
+ "test.foo"(%arg0) : (memref<?xi32>) -> ()
+ omp.terminator
+ }
+
+ // Update 'a' on the host using 'b'
+ omp.task depend(taskdependout -> %a: memref<?xi32>){
+ "test.bar"(%a, %b) : (memref<?xi32>, memref<?xi32>) -> ()
+ }
+
+ // Copy the updated 'a' onto the target
+ // CHECK: omp.target_update_data nowait motion_entries([[MAP0]] : memref<?xi32>) depend(taskdependin -> [[ARG0]] : memref<?xi32>)
+ omp.target_update_data motion_entries(%map_a : memref<?xi32>) depend(taskdependin -> %a : memref<?xi32>) nowait
+
+ // Compute 'c' on the target and copy it back
+ %map_c_from = omp.map_info var_ptr(%c: memref<?xi32>, tensor<?xi32>) map_clauses(from) capture(ByRef) -> memref<?xi32>
+ omp.target map_entries(%map_a -> %arg0, %map_c_from -> %arg1 : memref<?xi32>, memref<?xi32>) depend(taskdependout -> %c : memref<?xi32>) {
+ ^bb0(%arg0 : memref<?xi32>, %arg1 : memref<?xi32>) :
+ "test.foobar"() : ()->()
+ omp.terminator
+ }
+ // CHECK: omp.target_exit_data map_entries([[MAP2]] : memref<?xi32>) depend(taskdependin -> [[ARG2]] : memref<?xi32>)
+ omp.target_exit_data map_entries(%map_c : memref<?xi32>) depend(taskdependin -> %c : memref<?xi32>)
+ return
+}
diff --git a/mlir/test/Dialect/OpenMP/roundtrip.mlir b/mlir/test/Dialect/OpenMP/roundtrip.mlir
new file mode 100644
index 0000000..2553442
--- /dev/null
+++ b/mlir/test/Dialect/OpenMP/roundtrip.mlir
@@ -0,0 +1,21 @@
+// RUN: mlir-opt -verify-diagnostics %s | mlir-opt | FileCheck %s
+
+// CHECK: omp.private {type = private} @x.privatizer : !llvm.ptr alloc {
+omp.private {type = private} @x.privatizer : !llvm.ptr alloc {
+// CHECK: ^bb0(%arg0: {{.*}}):
+^bb0(%arg0: !llvm.ptr):
+ omp.yield(%arg0 : !llvm.ptr)
+}
+
+// CHECK: omp.private {type = firstprivate} @y.privatizer : !llvm.ptr alloc {
+omp.private {type = firstprivate} @y.privatizer : !llvm.ptr alloc {
+// CHECK: ^bb0(%arg0: {{.*}}):
+^bb0(%arg0: !llvm.ptr):
+ omp.yield(%arg0 : !llvm.ptr)
+// CHECK: } copy {
+} copy {
+// CHECK: ^bb0(%arg0: {{.*}}, %arg1: {{.*}}):
+^bb0(%arg0: !llvm.ptr, %arg1: !llvm.ptr):
+ omp.yield(%arg0 : !llvm.ptr)
+}
+
diff --git a/mlir/test/Dialect/Tensor/canonicalize.mlir b/mlir/test/Dialect/Tensor/canonicalize.mlir
index 90c715b..3b6cd79 100644
--- a/mlir/test/Dialect/Tensor/canonicalize.mlir
+++ b/mlir/test/Dialect/Tensor/canonicalize.mlir
@@ -809,6 +809,45 @@ func.func @fold_padding_value_pack(%arg0: tensor<1200x500000xf32>) -> tensor<312
// -----
+func.func @infer_src_shape_pack(%src: tensor<?x?x?x?xf32>, %dest: tensor<10x20x30x40x16xf32>) -> tensor<10x20x30x40x16xf32> {
+ %cst = arith.constant 0.000000e+00 : f32
+ %pack = tensor.pack %src
+ padding_value(%cst : f32)
+ outer_dims_perm = [2, 1, 3, 0]
+ inner_dims_pos = [2]
+ inner_tiles = [16]
+ into %dest : tensor<?x?x?x?xf32> -> tensor<10x20x30x40x16xf32>
+ return %pack : tensor<10x20x30x40x16xf32>
+}
+// CHECK-LABEL: func.func @infer_src_shape_pack
+// CHECK-SAME: %[[SRC:[0-9a-zA-Z]+]]
+// CHECK-SAME: %[[DEST:[0-9a-zA-Z]+]]
+// CHECK: %[[CAST_SRC:.+]] = tensor.cast %[[SRC]] : tensor<?x?x?x?xf32> to tensor<30x20x?x10xf32>
+// CHECK: %[[PACK:.+]] = tensor.pack %[[CAST_SRC]] {{.+}} into %[[DEST]]
+// CHECK: return %[[PACK]]
+
+// -----
+
+func.func @infer_dest_shape_pack(%src: tensor<30x20x?x10xf32>, %dest: tensor<?x?x?x?x16xf32>) -> tensor<?x?x?x?x16xf32> {
+ %cst = arith.constant 0.000000e+00 : f32
+ %pack = tensor.pack %src
+ padding_value(%cst : f32)
+ outer_dims_perm = [2, 1, 3, 0]
+ inner_dims_pos = [2]
+ inner_tiles = [16]
+ into %dest : tensor<30x20x?x10xf32> -> tensor<?x?x?x?x16xf32>
+ return %pack : tensor<?x?x?x?x16xf32>
+}
+// CHECK-LABEL: func.func @infer_dest_shape_pack
+// CHECK-SAME: %[[SRC:[0-9a-zA-Z]+]]
+// CHECK-SAME: %[[DEST:[0-9a-zA-Z]+]]
+// CHECK: %[[CAST_DEST:.+]] = tensor.cast %[[DEST]] : tensor<?x?x?x?x16xf32> to tensor<10x20x30x?x16xf32>
+// CHECK: %[[PACK:.+]] = tensor.pack %[[SRC]] {{.+}} into %[[CAST_DEST]]
+// CHECK: %[[CAST_PACK:.+]] = tensor.cast %[[PACK]] : tensor<10x20x30x?x16xf32> to tensor<?x?x?x?x16xf32>
+// CHECK: return %[[CAST_PACK]]
+
+// -----
+
func.func @fold_padding_value_pack_negative1(%arg0: tensor<1200x499999xf32>) -> tensor<31250x1200x16x1xf32> {
%cst = arith.constant 0.000000e+00 : f32
%0 = tensor.empty() : tensor<31250x1200x16x1xf32>
diff --git a/mlir/test/Dialect/Vector/linearize.mlir b/mlir/test/Dialect/Vector/linearize.mlir
new file mode 100644
index 0000000..85e2310
--- /dev/null
+++ b/mlir/test/Dialect/Vector/linearize.mlir
@@ -0,0 +1,19 @@
+// RUN: mlir-opt %s -split-input-file -test-vector-linearize | FileCheck %s
+
+// CHECK-LABEL: test_linearize
+// CHECK-SAME: (%[[ORIG_ARG:.*]]: vector<2x2xf32>)
+// CHECK: %[[ARG:.*]] = vector.shape_cast %[[ORIG_ARG]] : vector<2x2xf32> to vector<4xf32>
+func.func @test_linearize(%arg0: vector<2x2xf32>) -> vector<2x2xf32> {
+// CHECK: %[[C1:.*]] = arith.constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00]> : vector<4xf32>
+ %0 = arith.constant dense<[[1.0, 2.0], [3.0, 4.0]]> : vector<2x2xf32>
+// CHECK: %[[RES:.*]] = vector.shape_cast %[[C1]] : vector<4xf32> to vector<2x2xf32>
+
+// Arith and math ops are handled in generic way, check some of them
+// CHECK: %{{.*}} = math.sin %[[ARG]] : vector<4xf32>
+ %1 = math.sin %arg0 : vector<2x2xf32>
+// CHECK: %{{.*}} = arith.addf %[[ARG]], %[[C1]] : vector<4xf32>
+ %2 = arith.addf %arg0, %0 : vector<2x2xf32>
+
+// CHECK: return %[[RES]] : vector<2x2xf32>
+ return %0 : vector<2x2xf32>
+}
diff --git a/mlir/test/Dialect/Vector/vector-transfer-collapse-inner-most-dims.mlir b/mlir/test/Dialect/Vector/vector-transfer-collapse-inner-most-dims.mlir
index 750879d..3984f17 100644
--- a/mlir/test/Dialect/Vector/vector-transfer-collapse-inner-most-dims.mlir
+++ b/mlir/test/Dialect/Vector/vector-transfer-collapse-inner-most-dims.mlir
@@ -16,6 +16,25 @@ func.func @contiguous_inner_most_view(%in: memref<1x1x8x1xf32, strided<[3072, 8,
// -----
+func.func @contiguous_outer_dyn_inner_most_view(%in: memref<?x1x8x1xf32, strided<[3072, 8, 1, 1], offset: ?>>) -> vector<1x8x1xf32>{
+ %c0 = arith.constant 0 : index
+ %cst = arith.constant 0.0 : f32
+ %0 = vector.transfer_read %in[%c0, %c0, %c0, %c0], %cst {in_bounds = [true, true, true]} : memref<?x1x8x1xf32, strided<[3072, 8, 1, 1], offset: ?>>, vector<1x8x1xf32>
+ return %0 : vector<1x8x1xf32>
+}
+// CHECK: func @contiguous_outer_dyn_inner_most_view(
+// CHECK-SAME: %[[SRC:[a-zA-Z0-9]+]]
+// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
+// CHECK-DAG: %[[D0:.+]] = memref.dim %[[SRC]], %[[C0]]
+// CHECK: %[[SRC_0:.+]] = memref.subview %[[SRC]][0, 0, 0, 0] [%[[D0]], 1, 8, 1] [1, 1, 1, 1]
+// CHECK-SAME: memref<?x1x8x1xf32, strided<[3072, 8, 1, 1], offset: ?>> to memref<?x1x8xf32, strided<[3072, 8, 1], offset: ?>>
+// CHECK: %[[VEC:.+]] = vector.transfer_read %[[SRC_0]]
+// CHECK-SAME: memref<?x1x8xf32, strided<[3072, 8, 1], offset: ?>>, vector<1x8xf32>
+// CHECK: %[[RESULT:.+]] = vector.shape_cast %[[VEC]]
+// CHECK: return %[[RESULT]]
+
+// -----
+
func.func @contiguous_inner_most_dim(%A: memref<16x1xf32>, %i:index, %j:index) -> (vector<8x1xf32>) {
%c0 = arith.constant 0 : index
%f0 = arith.constant 0.0 : f32
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_d.mlir
new file mode 100755
index 0000000..c818c23
--- /dev/null
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_d.mlir
@@ -0,0 +1,134 @@
+//--------------------------------------------------------------------------------------------------
+// WHEN CREATING A NEW TEST, PLEASE JUST COPY & PASTE WITHOUT EDITS.
+//
+// Set-up that's shared across all tests in this directory. In principle, this
+// config could be moved to lit.local.cfg. However, there are downstream users that
+// do not use these LIT config files. Hence why this is kept inline.
+//
+// DEFINE: %{sparsifier_opts} = enable-runtime-library=true
+// DEFINE: %{sparsifier_opts_sve} = enable-arm-sve=true %{sparsifier_opts}
+// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
+// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
+// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
+// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
+// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
+//
+// DEFINE: %{env} =
+//--------------------------------------------------------------------------------------------------
+
+// RUN: %{compile} | %{run} | FileCheck %s
+//
+// Do the same run, but now with direct IR generation.
+// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false
+// RUN: %{compile} | %{run} | FileCheck %s
+
+#CCC = #sparse_tensor.encoding<{
+ map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed),
+ posWidth = 64,
+ crdWidth = 32
+}>
+
+#BatchedCSR = #sparse_tensor.encoding<{
+ map = (d0, d1, d2) -> (d0 : dense, d1 : dense, d2 : compressed),
+ posWidth = 64,
+ crdWidth = 32
+}>
+
+#CSRDense = #sparse_tensor.encoding<{
+ map = (d0, d1, d2) -> (d0 : dense, d1 : compressed, d2 : dense),
+ posWidth = 64,
+ crdWidth = 32
+}>
+
+//
+// Test assembly operation with CCC, batched-CSR and CSR-dense.
+//
+module {
+ //
+ // Main driver.
+ //
+ func.func @entry() {
+ %c0 = arith.constant 0 : index
+ %f0 = arith.constant 0.0 : f32
+
+ //
+ // Setup CCC.
+ //
+
+ %data0 = arith.constant dense<
+ [ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0 ]> : tensor<8xf32>
+ %pos00 = arith.constant dense<
+ [ 0, 3 ]> : tensor<2xi64>
+ %crd00 = arith.constant dense<
+ [ 0, 2, 3 ]> : tensor<3xi32>
+ %pos01 = arith.constant dense<
+ [ 0, 2, 4, 5 ]> : tensor<4xi64>
+ %crd01 = arith.constant dense<
+ [ 0, 1, 1, 2, 1 ]> : tensor<5xi32>
+ %pos02 = arith.constant dense<
+ [ 0, 2, 4, 5, 7, 8 ]> : tensor<6xi64>
+ %crd02 = arith.constant dense<
+ [ 0, 1, 0, 1, 0, 0, 1, 0 ]> : tensor<8xi32>
+
+ %s0 = sparse_tensor.assemble %data0, %pos00, %crd00, %pos01, %crd01, %pos02, %crd02 :
+ tensor<8xf32>,
+ tensor<2xi64>, tensor<3xi32>,
+ tensor<4xi64>, tensor<5xi32>,
+ tensor<6xi64>, tensor<8xi32> to tensor<4x3x2xf32, #CCC>
+
+ //
+ // Setup BatchedCSR.
+ //
+
+ %data1 = arith.constant dense<
+ [ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0 ]> : tensor<16xf32>
+ %pos1 = arith.constant dense<
+ [ 0, 2, 3, 4, 6, 6, 7, 9, 11, 13, 14, 15, 16 ]> : tensor<13xi64>
+ %crd1 = arith.constant dense<
+ [ 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1]> : tensor<16xi32>
+
+ %s1 = sparse_tensor.assemble %data1, %pos1, %crd1 : tensor<16xf32>, tensor<13xi64>, tensor<16xi32> to tensor<4x3x2xf32, #BatchedCSR>
+
+ //
+ // Setup CSRDense.
+ //
+
+ %data2 = arith.constant dense<
+ [ 1.0, 2.0, 0.0, 3.0, 4.0, 0.0, 5.0, 6.0, 0.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 0.0, 0.0, 15.0, 0.0, 16.0 ]> : tensor<22xf32>
+ %pos2 = arith.constant dense<
+ [ 0, 3, 5, 8, 11 ]> : tensor<5xi64>
+ %crd2 = arith.constant dense<
+ [ 0, 1, 2, 0, 2, 0, 1, 2, 0, 1, 2 ]> : tensor<11xi32>
+
+ %s2 = sparse_tensor.assemble %data2, %pos2, %crd2 : tensor<22xf32>, tensor<5xi64>, tensor<11xi32> to tensor<4x3x2xf32, #CSRDense>
+
+ //
+ // Verify.
+ //
+ // CHECK: ( ( ( 1, 2 ), ( 3, 4 ), ( 0, 0 ) ), ( ( 0, 0 ), ( 0, 0 ), ( 0, 0 ) ), ( ( 0, 0 ), ( 5, 0 ), ( 6, 7 ) ), ( ( 0, 0 ), ( 8, 0 ), ( 0, 0 ) ) )
+ // CHECK: ( ( ( 1, 2 ), ( 0, 3 ), ( 4, 0 ) ), ( ( 5, 6 ), ( 0, 0 ), ( 0, 7 ) ), ( ( 8, 9 ), ( 10, 11 ), ( 12, 13 ) ), ( ( 14, 0 ), ( 0, 15 ), ( 0, 16 ) ) )
+ // CHECK: ( ( ( 1, 2 ), ( 0, 3 ), ( 4, 0 ) ), ( ( 5, 6 ), ( 0, 0 ), ( 0, 7 ) ), ( ( 8, 9 ), ( 10, 11 ), ( 12, 13 ) ), ( ( 14, 0 ), ( 0, 15 ), ( 0, 16 ) ) )
+ //
+
+ %d0 = sparse_tensor.convert %s0 : tensor<4x3x2xf32, #CCC> to tensor<4x3x2xf32>
+ %v0 = vector.transfer_read %d0[%c0, %c0, %c0], %f0 : tensor<4x3x2xf32>, vector<4x3x2xf32>
+ vector.print %v0 : vector<4x3x2xf32>
+
+ %d1 = sparse_tensor.convert %s1 : tensor<4x3x2xf32, #BatchedCSR> to tensor<4x3x2xf32>
+ %v1 = vector.transfer_read %d1[%c0, %c0, %c0], %f0 : tensor<4x3x2xf32>, vector<4x3x2xf32>
+ vector.print %v1 : vector<4x3x2xf32>
+
+ %d2 = sparse_tensor.convert %s2 : tensor<4x3x2xf32, #CSRDense> to tensor<4x3x2xf32>
+ %v2 = vector.transfer_read %d1[%c0, %c0, %c0], %f0 : tensor<4x3x2xf32>, vector<4x3x2xf32>
+ vector.print %v2 : vector<4x3x2xf32>
+
+ // FIXME: doing this explicitly crashes runtime
+ // bufferization.dealloc_tensor %s0 : tensor<4x3x2xf32, #CCC>
+ // bufferization.dealloc_tensor %s1 : tensor<4x3x2xf32, #BatchedCSR>
+ // bufferization.dealloc_tensor %s2 : tensor<4x3x2xf32, #CSRDense>
+ return
+ }
+}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py
index 199777c..e2050b9 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py
@@ -139,12 +139,15 @@ def main():
# search the full state space to reduce runtime of the test. It is
# straightforward to adapt the code below to explore more combinations.
# For these simple orderings, dim2lvl and lvl2dim are the same.
+ builder = st.EncodingAttr.build_level_type
+ fmt = st.LevelFormat
+ prop = st.LevelProperty
levels = [
- [st.LevelType.compressed_nu, st.LevelType.singleton],
- [st.LevelType.dense, st.LevelType.dense],
- [st.LevelType.dense, st.LevelType.compressed],
- [st.LevelType.compressed, st.LevelType.dense],
- [st.LevelType.compressed, st.LevelType.compressed],
+ [builder(fmt.compressed, [prop.non_unique]), builder(fmt.singleton)],
+ [builder(fmt.dense), builder(fmt.dense)],
+ [builder(fmt.dense), builder(fmt.compressed)],
+ [builder(fmt.compressed), builder(fmt.dense)],
+ [builder(fmt.compressed), builder(fmt.compressed)],
]
orderings = [
ir.AffineMap.get_permutation([0, 1]),
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
index 0aa4f92..e7354c2 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
@@ -125,12 +125,15 @@ def main():
vl = 1
e = False
opt = f"parallelization-strategy=none"
+ builder = st.EncodingAttr.build_level_type
+ fmt = st.LevelFormat
+ prop = st.LevelProperty
levels = [
- [st.LevelType.compressed_nu, st.LevelType.singleton],
- [st.LevelType.dense, st.LevelType.dense],
- [st.LevelType.dense, st.LevelType.compressed],
- [st.LevelType.compressed, st.LevelType.dense],
- [st.LevelType.compressed, st.LevelType.compressed],
+ [builder(fmt.compressed, [prop.non_unique]), builder(fmt.singleton)],
+ [builder(fmt.dense), builder(fmt.dense)],
+ [builder(fmt.dense), builder(fmt.compressed)],
+ [builder(fmt.compressed), builder(fmt.dense)],
+ [builder(fmt.compressed), builder(fmt.compressed)],
]
orderings = [
ir.AffineMap.get_permutation([0, 1]),
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py
index d994e8d..7da0530 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py
@@ -124,11 +124,14 @@ def main():
# Loop over various sparse types (COO, CSR, DCSR, CSC, DCSC) with
# regular and loose compression and various metadata bitwidths.
# For these simple orderings, dim2lvl and lvl2dim are the same.
+ builder = st.EncodingAttr.build_level_type
+ fmt = st.LevelFormat
+ prop = st.LevelProperty
levels = [
- [st.LevelType.compressed_nu, st.LevelType.singleton],
- [st.LevelType.dense, st.LevelType.compressed],
- [st.LevelType.dense, st.LevelType.loose_compressed],
- [st.LevelType.compressed, st.LevelType.compressed],
+ [builder(fmt.compressed, [prop.non_unique]), builder(fmt.singleton)],
+ [builder(fmt.dense), builder(fmt.compressed)],
+ [builder(fmt.dense), builder(fmt.loose_compressed)],
+ [builder(fmt.compressed), builder(fmt.compressed)],
]
orderings = [
(ir.AffineMap.get_permutation([0, 1]), 0),
@@ -149,10 +152,10 @@ def main():
# Now do the same for BSR.
level = [
- st.LevelType.dense,
- st.LevelType.compressed,
- st.LevelType.dense,
- st.LevelType.dense,
+ builder(fmt.dense),
+ builder(fmt.compressed),
+ builder(fmt.dense),
+ builder(fmt.dense),
]
d0 = ir.AffineDimExpr.get(0)
d1 = ir.AffineDimExpr.get(1)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py
index 2b79c14..ce3516e 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py
@@ -203,10 +203,10 @@ def main():
shape = range(2, 3)
rank = len(shape)
# All combinations.
+ dense_lvl = st.EncodingAttr.build_level_type(st.LevelFormat.dense)
+ sparse_lvl = st.EncodingAttr.build_level_type(st.LevelFormat.compressed)
levels = list(
- itertools.product(
- *itertools.repeat([st.LevelType.dense, st.LevelType.compressed], rank)
- )
+ itertools.product(*itertools.repeat([dense_lvl, sparse_lvl], rank))
)
# All permutations.
orderings = list(
diff --git a/mlir/test/Integration/GPU/CUDA/sm90/gemm_f32_f16_f16_128x128x128.mlir b/mlir/test/Integration/GPU/CUDA/sm90/gemm_f32_f16_f16_128x128x128.mlir
index 35ca0ee..51bcf45 100644
--- a/mlir/test/Integration/GPU/CUDA/sm90/gemm_f32_f16_f16_128x128x128.mlir
+++ b/mlir/test/Integration/GPU/CUDA/sm90/gemm_f32_f16_f16_128x128x128.mlir
@@ -197,7 +197,8 @@ func.func @main() {
{
%ticks = arith.constant 10000000 : index
// TMA wait
- nvgpu.mbarrier.try_wait.parity %barrier[%i], %c0, %ticks : !barrierType
+ %phase_c0 = arith.constant 0 : i1
+ nvgpu.mbarrier.try_wait.parity %barrier[%i], %phase_c0, %ticks : !barrierType
%lhsSlice = memref.subview %lhsShmem [%i, 0, 0][1, 128, 64][1, 1, 1] : memref<2x128x64xf16, 3> to memref<128x64xf16, strided<[64, 1], offset: ?>, 3>
%rhsSlice = memref.subview %rhsShmem [%i, 0, 0][1, 64, 128][1, 1, 1] : memref<2x64x128xf16, strided<[8192, 128, 1], offset: 16384>, 3> to memref<64x128xf16, strided<[128, 1], offset: ?>, 3>
// Descriptor WGMMA
diff --git a/mlir/test/Integration/GPU/CUDA/sm90/gemm_pred_f32_f16_f16_128x128x128.mlir b/mlir/test/Integration/GPU/CUDA/sm90/gemm_pred_f32_f16_f16_128x128x128.mlir
index 5a10bbb..85bdb38 100644
--- a/mlir/test/Integration/GPU/CUDA/sm90/gemm_pred_f32_f16_f16_128x128x128.mlir
+++ b/mlir/test/Integration/GPU/CUDA/sm90/gemm_pred_f32_f16_f16_128x128x128.mlir
@@ -206,7 +206,8 @@ func.func @main() {
{
%ticks = arith.constant 10000000 : index
// TMA wait
- nvgpu.mbarrier.try_wait.parity %barrier[%i], %c0, %ticks : !barrierType
+ %phase_c0 = arith.constant 0 : i1
+ nvgpu.mbarrier.try_wait.parity %barrier[%i], %phase_c0, %ticks : !barrierType
%lhsSlice = memref.subview %lhsShmem [%i, 0, 0][1, 128, 64][1, 1, 1] : memref<2x128x64xf16, 3> to memref<128x64xf16, strided<[64, 1], offset: ?>, 3>
%rhsSlice = memref.subview %rhsShmem [%i, 0, 0][1, 64, 128][1, 1, 1] : memref<2x64x128xf16, strided<[8192, 128, 1], offset: 16384>, 3> to memref<64x128xf16, strided<[128, 1], offset: ?>, 3>
// Descriptor WGMMA
diff --git a/mlir/test/Integration/GPU/CUDA/sm90/tma_load_128x64_swizzle128b.mlir b/mlir/test/Integration/GPU/CUDA/sm90/tma_load_128x64_swizzle128b.mlir
index 9c5aacf..b50772f 100644
--- a/mlir/test/Integration/GPU/CUDA/sm90/tma_load_128x64_swizzle128b.mlir
+++ b/mlir/test/Integration/GPU/CUDA/sm90/tma_load_128x64_swizzle128b.mlir
@@ -93,7 +93,8 @@ module @mymod {
}
// Step 8. Wait until TMA is done
- nvgpu.mbarrier.try_wait.parity %9[%c0], %c0, %c10000000 : !barrierType
+ %phase_c0 = arith.constant 0 : i1
+ nvgpu.mbarrier.try_wait.parity %9[%c0], %phase_c0, %c10000000 : !barrierType
// Step 9. Print loaded data in 128b swizzled
scf.if %10 {
diff --git a/mlir/test/Integration/GPU/CUDA/sm90/tma_load_64x64_swizzle128b.mlir b/mlir/test/Integration/GPU/CUDA/sm90/tma_load_64x64_swizzle128b.mlir
index 536e71d..65e5fc0 100644
--- a/mlir/test/Integration/GPU/CUDA/sm90/tma_load_64x64_swizzle128b.mlir
+++ b/mlir/test/Integration/GPU/CUDA/sm90/tma_load_64x64_swizzle128b.mlir
@@ -119,7 +119,8 @@ module @mymod {
}
// Step 7. Wait until TMA is done
- nvgpu.mbarrier.try_wait.parity %9[%c0], %c0, %c10000000 : !barrierType
+ %phase_c0 = arith.constant 0 : i1
+ nvgpu.mbarrier.try_wait.parity %9[%c0], %phase_c0, %c10000000 : !barrierType
// Step 8. Print loaded data in 128b swizzled
scf.if %10 {
diff --git a/mlir/test/Integration/GPU/CUDA/sm90/tma_load_64x8_8x128_noswizzle.mlir b/mlir/test/Integration/GPU/CUDA/sm90/tma_load_64x8_8x128_noswizzle.mlir
index aee265e..2e59b72 100644
--- a/mlir/test/Integration/GPU/CUDA/sm90/tma_load_64x8_8x128_noswizzle.mlir
+++ b/mlir/test/Integration/GPU/CUDA/sm90/tma_load_64x8_8x128_noswizzle.mlir
@@ -96,7 +96,8 @@ module @mymod {
} else {
nvgpu.mbarrier.arrive.expect_tx %9[%c0], %c0 : <memorySpace = #gpu.address_space<workgroup>>
}
- nvgpu.mbarrier.try_wait.parity %9[%c0], %c0, %c10000000 : <memorySpace = #gpu.address_space<workgroup>>
+ %phase_c0 = arith.constant 0 : i1
+ nvgpu.mbarrier.try_wait.parity %9[%c0], %phase_c0, %c10000000 : <memorySpace = #gpu.address_space<workgroup>>
scf.if %10 {
%11 = memref.load %7[%c45, %c7] : memref<64x8xf32, 3>
%12 = memref.load %8[%c7, %c0] : memref<8x128xf32, 3>
diff --git a/mlir/test/Target/LLVMIR/nvvmir.mlir b/mlir/test/Target/LLVMIR/nvvmir.mlir
index 3a6a454..a8ae4d9 100644
--- a/mlir/test/Target/LLVMIR/nvvmir.mlir
+++ b/mlir/test/Target/LLVMIR/nvvmir.mlir
@@ -80,6 +80,18 @@ llvm.func @llvm_nvvm_barrier0() {
llvm.return
}
+// CHECK-LABEL: @llvm_nvvm_barrier(
+// CHECK-SAME: i32 %[[barId:.*]], i32 %[[numThreads:.*]])
+llvm.func @llvm_nvvm_barrier(%barID : i32, %numberOfThreads : i32) {
+ // CHECK: call void @llvm.nvvm.barrier0()
+ nvvm.barrier
+ // CHECK: call void @llvm.nvvm.barrier.n(i32 %[[barId]])
+ nvvm.barrier id = %barID
+ // CHECK: call void @llvm.nvvm.barrier(i32 %[[barId]], i32 %[[numThreads]])
+ nvvm.barrier id = %barID number_of_threads = %numberOfThreads
+ llvm.return
+}
+
// CHECK-LABEL: @llvm_nvvm_cluster_arrive
llvm.func @llvm_nvvm_cluster_arrive() {
// CHECK: call void @llvm.nvvm.barrier.cluster.arrive()
@@ -513,6 +525,13 @@ llvm.func @kernel_func() attributes {nvvm.kernel, nvvm.maxntid = array<i32: 1, 2
// CHECK: {ptr @kernel_func, !"minctasm", i32 16}
// -----
+
+llvm.func @kernel_func(%numberOfThreads : i32) {
+ // expected-error @below {{'nvvm.barrier' op barrier id is missing, it should be set between 0 to 15}}
+ nvvm.barrier number_of_threads = %numberOfThreads
+}
+
+// -----
// expected-error @below {{'"nvvm.minctasm"' attribute must be integer constant}}
llvm.func @kernel_func() attributes {nvvm.kernel,
nvvm.minctasm = "foo"} {
diff --git a/mlir/test/Target/LLVMIR/openmp-reduction.mlir b/mlir/test/Target/LLVMIR/openmp-reduction.mlir
index dae83c0..8c3c9cd 100644
--- a/mlir/test/Target/LLVMIR/openmp-reduction.mlir
+++ b/mlir/test/Target/LLVMIR/openmp-reduction.mlir
@@ -26,10 +26,12 @@ llvm.func @simple_reduction(%lb : i64, %ub : i64, %step : i64) {
%c1 = llvm.mlir.constant(1 : i32) : i32
%0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
omp.parallel {
- omp.wsloop reduction(@add_f32 -> %0 : !llvm.ptr)
+ omp.wsloop reduction(@add_f32 %0 -> %prv : !llvm.ptr)
for (%iv) : i64 = (%lb) to (%ub) step (%step) {
%1 = llvm.mlir.constant(2.0 : f32) : f32
- omp.reduction %1, %0 : f32, !llvm.ptr
+ %2 = llvm.load %prv : !llvm.ptr -> f32
+ %3 = llvm.fadd %1, %2 : f32
+ llvm.store %3, %prv : f32, !llvm.ptr
omp.yield
}
omp.terminator
@@ -67,7 +69,7 @@ llvm.func @simple_reduction(%lb : i64, %ub : i64, %step : i64) {
// Update of the private variable using the reduction region
// (the body block currently comes after all the other blocks).
// CHECK: %[[PARTIAL:.+]] = load float, ptr %[[PRIVATE]]
-// CHECK: %[[UPDATED:.+]] = fadd float %[[PARTIAL]], 2.000000e+00
+// CHECK: %[[UPDATED:.+]] = fadd float 2.000000e+00, %[[PARTIAL]]
// CHECK: store float %[[UPDATED]], ptr %[[PRIVATE]]
// Reduction function.
@@ -103,11 +105,15 @@ llvm.func @reuse_declaration(%lb : i64, %ub : i64, %step : i64) {
%0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
%2 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
omp.parallel {
- omp.wsloop reduction(@add_f32 -> %0 : !llvm.ptr, @add_f32 -> %2 : !llvm.ptr)
+ omp.wsloop reduction(@add_f32 %0 -> %prv0 : !llvm.ptr, @add_f32 %2 -> %prv1 : !llvm.ptr)
for (%iv) : i64 = (%lb) to (%ub) step (%step) {
%1 = llvm.mlir.constant(2.0 : f32) : f32
- omp.reduction %1, %0 : f32, !llvm.ptr
- omp.reduction %1, %2 : f32, !llvm.ptr
+ %3 = llvm.load %prv0 : !llvm.ptr -> f32
+ %4 = llvm.fadd %3, %1 : f32
+ llvm.store %4, %prv0 : f32, !llvm.ptr
+ %5 = llvm.load %prv1 : !llvm.ptr -> f32
+ %6 = llvm.fadd %5, %1 : f32
+ llvm.store %6, %prv1 : f32, !llvm.ptr
omp.yield
}
omp.terminator
@@ -189,10 +195,12 @@ llvm.func @missing_omp_reduction(%lb : i64, %ub : i64, %step : i64) {
%0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
%2 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
omp.parallel {
- omp.wsloop reduction(@add_f32 -> %0 : !llvm.ptr, @add_f32 -> %2 : !llvm.ptr)
+ omp.wsloop reduction(@add_f32 %0 -> %prv0 : !llvm.ptr, @add_f32 %2 -> %prv1 : !llvm.ptr)
for (%iv) : i64 = (%lb) to (%ub) step (%step) {
%1 = llvm.mlir.constant(2.0 : f32) : f32
- omp.reduction %1, %0 : f32, !llvm.ptr
+ %3 = llvm.load %prv0 : !llvm.ptr -> f32
+ %4 = llvm.fadd %3, %1 : f32
+ llvm.store %4, %prv0 : f32, !llvm.ptr
omp.yield
}
omp.terminator
@@ -272,11 +280,15 @@ llvm.func @double_reference(%lb : i64, %ub : i64, %step : i64) {
%c1 = llvm.mlir.constant(1 : i32) : i32
%0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
omp.parallel {
- omp.wsloop reduction(@add_f32 -> %0 : !llvm.ptr)
+ omp.wsloop reduction(@add_f32 %0 -> %prv : !llvm.ptr)
for (%iv) : i64 = (%lb) to (%ub) step (%step) {
%1 = llvm.mlir.constant(2.0 : f32) : f32
- omp.reduction %1, %0 : f32, !llvm.ptr
- omp.reduction %1, %0 : f32, !llvm.ptr
+ %2 = llvm.load %prv : !llvm.ptr -> f32
+ %3 = llvm.fadd %2, %1 : f32
+ llvm.store %3, %prv : f32, !llvm.ptr
+ %4 = llvm.load %prv : !llvm.ptr -> f32
+ %5 = llvm.fadd %4, %1 : f32
+ llvm.store %5, %prv : f32, !llvm.ptr
omp.yield
}
omp.terminator
@@ -362,11 +374,15 @@ llvm.func @no_atomic(%lb : i64, %ub : i64, %step : i64) {
%0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
%2 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
omp.parallel {
- omp.wsloop reduction(@add_f32 -> %0 : !llvm.ptr, @mul_f32 -> %2 : !llvm.ptr)
+ omp.wsloop reduction(@add_f32 %0 -> %prv0 : !llvm.ptr, @mul_f32 %2 -> %prv1 : !llvm.ptr)
for (%iv) : i64 = (%lb) to (%ub) step (%step) {
%1 = llvm.mlir.constant(2.0 : f32) : f32
- omp.reduction %1, %0 : f32, !llvm.ptr
- omp.reduction %1, %2 : f32, !llvm.ptr
+ %3 = llvm.load %prv0 : !llvm.ptr -> f32
+ %4 = llvm.fadd %3, %1 : f32
+ llvm.store %4, %prv0 : f32, !llvm.ptr
+ %5 = llvm.load %prv1 : !llvm.ptr -> f32
+ %6 = llvm.fmul %5, %1 : f32
+ llvm.store %6, %prv1 : f32, !llvm.ptr
omp.yield
}
omp.terminator
diff --git a/mlir/test/Target/LLVMIR/rocdl.mlir b/mlir/test/Target/LLVMIR/rocdl.mlir
index 2612330..06b7865 100644
--- a/mlir/test/Target/LLVMIR/rocdl.mlir
+++ b/mlir/test/Target/LLVMIR/rocdl.mlir
@@ -90,6 +90,22 @@ llvm.func @rocdl.barrier() {
llvm.return
}
+llvm.func @rocdl.setprio() {
+ // CHECK: call void @llvm.amdgcn.s.setprio(i16 0)
+ rocdl.s.setprio 0
+ // CHECK-NEXT: call void @llvm.amdgcn.s.setprio(i16 1)
+ rocdl.s.setprio 1
+ llvm.return
+}
+
+llvm.func @rocdl.schedbarrier() {
+ // CHECK: call void @llvm.amdgcn.sched.barrier(i32 0)
+ rocdl.sched.barrier 0
+ // CHECK-NEXT: call void @llvm.amdgcn.sched.barrier(i32 1)
+ rocdl.sched.barrier 1
+ llvm.return
+}
+
llvm.func @rocdl.xdlops(%arg0 : f32, %arg1 : f32,
%arg2 : vector<32 x f32>, %arg3: i32,
%arg4 : vector<16 x f32>, %arg5 : vector<4xf32>,
diff --git a/mlir/test/Transforms/test-legalizer.mlir b/mlir/test/Transforms/test-legalizer.mlir
index d8cf6e4..84fcc18 100644
--- a/mlir/test/Transforms/test-legalizer.mlir
+++ b/mlir/test/Transforms/test-legalizer.mlir
@@ -320,3 +320,17 @@ module {
return
}
}
+
+// -----
+
+// CHECK-LABEL: func @test_move_op_before_rollback()
+func.func @test_move_op_before_rollback() {
+ // CHECK: "test.one_region_op"()
+ // CHECK: "test.hoist_me"()
+ "test.one_region_op"() ({
+ // expected-remark @below{{'test.hoist_me' is not legalizable}}
+ %0 = "test.hoist_me"() : () -> (i32)
+ "test.valid"(%0) : (i32) -> ()
+ }) : () -> ()
+ "test.return"() : () -> ()
+}
diff --git a/mlir/test/lib/Dialect/Test/TestPatterns.cpp b/mlir/test/lib/Dialect/Test/TestPatterns.cpp
index d7e5d6d..1c02232 100644
--- a/mlir/test/lib/Dialect/Test/TestPatterns.cpp
+++ b/mlir/test/lib/Dialect/Test/TestPatterns.cpp
@@ -773,6 +773,22 @@ struct TestUndoBlockArgReplace : public ConversionPattern {
}
};
+/// This pattern hoists ops out of a "test.hoist_me" and then fails conversion.
+/// This is to test the rollback logic.
+struct TestUndoMoveOpBefore : public ConversionPattern {
+ TestUndoMoveOpBefore(MLIRContext *ctx)
+ : ConversionPattern("test.hoist_me", /*benefit=*/1, ctx) {}
+
+ LogicalResult
+ matchAndRewrite(Operation *op, ArrayRef<Value> operands,
+ ConversionPatternRewriter &rewriter) const override {
+ rewriter.moveOpBefore(op, op->getParentOp());
+ // Replace with an illegal op to ensure the conversion fails.
+ rewriter.replaceOpWithNewOp<ILLegalOpF>(op, rewriter.getF32Type());
+ return success();
+ }
+};
+
/// A rewrite pattern that tests the undo mechanism when erasing a block.
struct TestUndoBlockErase : public ConversionPattern {
TestUndoBlockErase(MLIRContext *ctx)
@@ -1069,7 +1085,7 @@ struct TestLegalizePatternDriver
TestChangeProducerTypeF32ToInvalid, TestUpdateConsumerType,
TestNonRootReplacement, TestBoundedRecursiveRewrite,
TestNestedOpCreationUndoRewrite, TestReplaceEraseOp,
- TestCreateUnregisteredOp>(&getContext());
+ TestCreateUnregisteredOp, TestUndoMoveOpBefore>(&getContext());
patterns.add<TestDropOpSignatureConversion>(&getContext(), converter);
mlir::populateAnyFunctionOpInterfaceTypeConversionPattern(patterns,
converter);
@@ -1079,7 +1095,7 @@ struct TestLegalizePatternDriver
ConversionTarget target(getContext());
target.addLegalOp<ModuleOp>();
target.addLegalOp<LegalOpA, LegalOpB, LegalOpC, TestCastOp, TestValidOp,
- TerminatorOp>();
+ TerminatorOp, OneRegionOp>();
target
.addIllegalOp<ILLegalOpF, TestRegionBuilderOp, TestOpWithRegionFold>();
target.addDynamicallyLegalOp<TestReturnOp>([](TestReturnOp op) {
diff --git a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
index 126d65b..acd3898 100644
--- a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
+++ b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
@@ -823,6 +823,33 @@ struct TestVectorEmulateMaskedLoadStore final
(void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
}
};
+
+struct TestVectorLinearize final
+ : public PassWrapper<TestVectorLinearize, OperationPass<>> {
+ MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestVectorLinearize)
+
+ StringRef getArgument() const override { return "test-vector-linearize"; }
+ StringRef getDescription() const override {
+ return "Linearizes ND vectors for N >= 2 into 1D vectors";
+ }
+ void getDependentDialects(DialectRegistry &registry) const override {
+ registry.insert<vector::VectorDialect>();
+ }
+
+ void runOnOperation() override {
+ auto *context = &getContext();
+
+ TypeConverter typeConverter;
+ RewritePatternSet patterns(context);
+ ConversionTarget target(*context);
+
+ vector::populateVectorLinearizeTypeConversionsAndLegality(typeConverter,
+ patterns, target);
+ if (failed(applyPartialConversion(getOperation(), target,
+ std::move(patterns))))
+ return signalPassFailure();
+ }
+};
} // namespace
namespace mlir {
@@ -867,6 +894,8 @@ void registerTestVectorLowerings() {
PassRegistration<TestFoldArithExtensionIntoVectorContractPatterns>();
PassRegistration<TestVectorEmulateMaskedLoadStore>();
+
+ PassRegistration<TestVectorLinearize>();
}
} // namespace test
} // namespace mlir
diff --git a/mlir/test/python/dialects/llvm.py b/mlir/test/python/dialects/llvm.py
index 2d207ae..fb4b343 100644
--- a/mlir/test/python/dialects/llvm.py
+++ b/mlir/test/python/dialects/llvm.py
@@ -15,6 +15,90 @@ def constructAndPrintInModule(f):
return f
+# CHECK-LABEL: testStructType
+@constructAndPrintInModule
+def testStructType():
+ print(llvm.StructType.get_literal([]))
+ # CHECK: !llvm.struct<()>
+
+ i8, i32, i64 = tuple(map(lambda x: IntegerType.get_signless(x), [8, 32, 64]))
+ print(llvm.StructType.get_literal([i8, i32, i64]))
+ print(llvm.StructType.get_literal([i32]))
+ print(llvm.StructType.get_literal([i32, i32], packed=True))
+ literal = llvm.StructType.get_literal([i8, i32, i64])
+ assert len(literal.body) == 3
+ print(*tuple(literal.body))
+ assert literal.name is None
+ # CHECK: !llvm.struct<(i8, i32, i64)>
+ # CHECK: !llvm.struct<(i32)>
+ # CHECK: !llvm.struct<packed (i32, i32)>
+ # CHECK: i8 i32 i64
+
+ assert llvm.StructType.get_literal([i32]) == llvm.StructType.get_literal([i32])
+ assert llvm.StructType.get_literal([i32]) != llvm.StructType.get_literal([i64])
+
+ print(llvm.StructType.get_identified("foo"))
+ print(llvm.StructType.get_identified("bar"))
+ # CHECK: !llvm.struct<"foo", opaque>
+ # CHECK: !llvm.struct<"bar", opaque>
+
+ assert llvm.StructType.get_identified("foo") == llvm.StructType.get_identified(
+ "foo"
+ )
+ assert llvm.StructType.get_identified("foo") != llvm.StructType.get_identified(
+ "bar"
+ )
+
+ foo_struct = llvm.StructType.get_identified("foo")
+ print(foo_struct.name)
+ print(foo_struct.body)
+ assert foo_struct.opaque
+ foo_struct.set_body([i32, i64])
+ print(*tuple(foo_struct.body))
+ print(foo_struct)
+ assert not foo_struct.packed
+ assert not foo_struct.opaque
+ assert llvm.StructType.get_identified("foo") == foo_struct
+ # CHECK: foo
+ # CHECK: None
+ # CHECK: i32 i64
+ # CHECK: !llvm.struct<"foo", (i32, i64)>
+
+ bar_struct = llvm.StructType.get_identified("bar")
+ bar_struct.set_body([i32], packed=True)
+ print(bar_struct)
+ assert bar_struct.packed
+ # CHECK: !llvm.struct<"bar", packed (i32)>
+
+ # Same body, should not raise.
+ foo_struct.set_body([i32, i64])
+
+ try:
+ foo_struct.set_body([])
+ except ValueError as e:
+ pass
+ else:
+ assert False, "expected exception not raised"
+
+ try:
+ bar_struct.set_body([i32])
+ except ValueError as e:
+ pass
+ else:
+ assert False, "expected exception not raised"
+
+ print(llvm.StructType.new_identified("foo", []))
+ assert llvm.StructType.new_identified("foo", []) != llvm.StructType.new_identified(
+ "foo", []
+ )
+ # CHECK: !llvm.struct<"foo{{[^"]+}}
+
+ opaque = llvm.StructType.get_opaque("opaque")
+ print(opaque)
+ assert opaque.opaque
+ # CHECK: !llvm.struct<"opaque", opaque>
+
+
# CHECK-LABEL: testSmoke
@constructAndPrintInModule
def testSmoke():
diff --git a/mlir/test/python/dialects/sparse_tensor/dialect.py b/mlir/test/python/dialects/sparse_tensor/dialect.py
index 1fa7030..2c06032 100644
--- a/mlir/test/python/dialects/sparse_tensor/dialect.py
+++ b/mlir/test/python/dialects/sparse_tensor/dialect.py
@@ -73,8 +73,8 @@ def testEncodingAttrStructure():
# CHECK: lvl_types: [65536, 65536, 4406637494272]
print(f"lvl_types: {casted.lvl_types}")
- # CHECK: lvl_types_enum: [<LevelType.dense: 65536>, <LevelType.dense: 65536>, <LevelType.n_out_of_m: 1048576>]
- print(f"lvl_types_enum: {casted.lvl_types_enum}")
+ # CHECK: lvl_formats_enum: [<LevelFormat.dense: 65536>, <LevelFormat.dense: 65536>, <LevelFormat.n_out_of_m: 1048576>]
+ print(f"lvl_formats_enum: {casted.lvl_formats_enum}")
# CHECK: structured_n: 2
print(f"structured_n: {casted.structured_n}")
# CHECK: structured_m: 4
@@ -96,7 +96,10 @@ def testEncodingAttrStructure():
# CHECK: created_equal: False
print(f"created_equal: {created == casted}")
- built_2_4 = st.EncodingAttr.build_level_type(st.LevelType.n_out_of_m, 2, 4)
+ built_2_4 = st.EncodingAttr.build_level_type(
+ st.LevelFormat.n_out_of_m, [], 2, 4
+ )
+ built_dense = st.EncodingAttr.build_level_type(st.LevelFormat.dense)
dim_to_lvl = AffineMap.get(
2,
0,
@@ -118,7 +121,7 @@ def testEncodingAttrStructure():
],
)
built = st.EncodingAttr.get(
- [st.LevelType.dense, st.LevelType.dense, built_2_4],
+ [built_dense, built_dense, built_2_4],
dim_to_lvl,
lvl_to_dim,
0,
diff --git a/mlir/test/python/ir/builtin_types.py b/mlir/test/python/ir/builtin_types.py
index 30a5054..4eea1a9 100644
--- a/mlir/test/python/ir/builtin_types.py
+++ b/mlir/test/python/ir/builtin_types.py
@@ -100,8 +100,38 @@ def testTypeIsInstance():
print(IntegerType.isinstance(t1))
# CHECK: False
print(F32Type.isinstance(t1))
+ # CHECK: False
+ print(FloatType.isinstance(t1))
# CHECK: True
print(F32Type.isinstance(t2))
+ # CHECK: True
+ print(FloatType.isinstance(t2))
+
+
+# CHECK-LABEL: TEST: testFloatTypeSubclasses
+@run
+def testFloatTypeSubclasses():
+ ctx = Context()
+ # CHECK: True
+ print(isinstance(Type.parse("f8E4M3FN", ctx), FloatType))
+ # CHECK: True
+ print(isinstance(Type.parse("f8E5M2", ctx), FloatType))
+ # CHECK: True
+ print(isinstance(Type.parse("f8E4M3FNUZ", ctx), FloatType))
+ # CHECK: True
+ print(isinstance(Type.parse("f8E4M3B11FNUZ", ctx), FloatType))
+ # CHECK: True
+ print(isinstance(Type.parse("f8E5M2FNUZ", ctx), FloatType))
+ # CHECK: True
+ print(isinstance(Type.parse("f16", ctx), FloatType))
+ # CHECK: True
+ print(isinstance(Type.parse("bf16", ctx), FloatType))
+ # CHECK: True
+ print(isinstance(Type.parse("f32", ctx), FloatType))
+ # CHECK: True
+ print(isinstance(Type.parse("tf32", ctx), FloatType))
+ # CHECK: True
+ print(isinstance(Type.parse("f64", ctx), FloatType))
# CHECK-LABEL: TEST: testTypeEqDoesNotRaise
@@ -218,7 +248,10 @@ def testFloatType():
# CHECK: float: f32
print("float:", F32Type.get())
# CHECK: float: f64
- print("float:", F64Type.get())
+ f64 = F64Type.get()
+ print("float:", f64)
+ # CHECK: f64 width: 64
+ print("f64 width:", f64.width)
# CHECK-LABEL: TEST: testNoneType
diff --git a/mlir/unittests/Analysis/Presburger/BarvinokTest.cpp b/mlir/unittests/Analysis/Presburger/BarvinokTest.cpp
index 919aaa7..5e279b5 100644
--- a/mlir/unittests/Analysis/Presburger/BarvinokTest.cpp
+++ b/mlir/unittests/Analysis/Presburger/BarvinokTest.cpp
@@ -1,5 +1,6 @@
#include "mlir/Analysis/Presburger/Barvinok.h"
#include "./Utils.h"
+#include "Parser.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
@@ -59,7 +60,8 @@ TEST(BarvinokTest, unimodularConeGeneratingFunction) {
ParamPoint vertex =
makeFracMatrix(2, 3, {{2, 2, 0}, {-1, -Fraction(1, 2), 1}});
- GeneratingFunction gf = unimodularConeGeneratingFunction(vertex, 1, cone);
+ GeneratingFunction gf =
+ computeUnimodularConeGeneratingFunction(vertex, 1, cone);
EXPECT_EQ_REPR_GENERATINGFUNCTION(
gf, GeneratingFunction(
@@ -74,7 +76,7 @@ TEST(BarvinokTest, unimodularConeGeneratingFunction) {
vertex = makeFracMatrix(3, 2, {{5, 2}, {6, 2}, {7, 1}});
- gf = unimodularConeGeneratingFunction(vertex, 1, cone);
+ gf = computeUnimodularConeGeneratingFunction(vertex, 1, cone);
EXPECT_EQ_REPR_GENERATINGFUNCTION(
gf,
@@ -125,7 +127,7 @@ TEST(BarvinokTest, getCoefficientInRationalFunction) {
EXPECT_EQ(coeff.getConstantTerm(), Fraction(55, 64));
}
-TEST(BarvinokTest, computeNumTerms) {
+TEST(BarvinokTest, computeNumTermsCone) {
// The following test is taken from
// Verdoolaege, Sven, et al. "Counting integer points in parametric
// polytopes using Barvinok's rational functions." Algorithmica 48 (2007):
@@ -233,4 +235,69 @@ TEST(BarvinokTest, computeNumTerms) {
for (unsigned j = 0; j < 2; j++)
for (unsigned k = 0; k < 2; k++)
EXPECT_EQ(count[i][j][k], 1);
-} \ No newline at end of file
+}
+
+/// We define some simple polyhedra with unimodular tangent cones and verify
+/// that the returned generating functions correspond to those calculated by
+/// hand.
+TEST(BarvinokTest, computeNumTermsPolytope) {
+ // A cube of side 1.
+ PolyhedronH poly =
+ parseRelationFromSet("(x, y, z) : (x >= 0, y >= 0, z >= 0, -x + 1 >= 0, "
+ "-y + 1 >= 0, -z + 1 >= 0)",
+ 0);
+
+ std::vector<std::pair<PresburgerSet, GeneratingFunction>> count =
+ computePolytopeGeneratingFunction(poly);
+ // There is only one chamber, as it is non-parametric.
+ EXPECT_EQ(count.size(), 9u);
+
+ GeneratingFunction gf = count[0].second;
+ EXPECT_EQ_REPR_GENERATINGFUNCTION(
+ gf,
+ GeneratingFunction(
+ 0, {1, 1, 1, 1, 1, 1, 1, 1},
+ {makeFracMatrix(1, 3, {{1, 1, 1}}), makeFracMatrix(1, 3, {{0, 1, 1}}),
+ makeFracMatrix(1, 3, {{0, 1, 1}}), makeFracMatrix(1, 3, {{0, 0, 1}}),
+ makeFracMatrix(1, 3, {{0, 1, 1}}), makeFracMatrix(1, 3, {{0, 0, 1}}),
+ makeFracMatrix(1, 3, {{0, 0, 1}}),
+ makeFracMatrix(1, 3, {{0, 0, 0}})},
+ {{{-1, 0, 0}, {0, -1, 0}, {0, 0, -1}},
+ {{0, 0, 1}, {-1, 0, 0}, {0, -1, 0}},
+ {{0, 1, 0}, {-1, 0, 0}, {0, 0, -1}},
+ {{0, 1, 0}, {0, 0, 1}, {-1, 0, 0}},
+ {{1, 0, 0}, {0, -1, 0}, {0, 0, -1}},
+ {{1, 0, 0}, {0, 0, 1}, {0, -1, 0}},
+ {{1, 0, 0}, {0, 1, 0}, {0, 0, -1}},
+ {{1, 0, 0}, {0, 1, 0}, {0, 0, 1}}}));
+
+ // A right-angled triangle with side p.
+ poly =
+ parseRelationFromSet("(x, y)[N] : (x >= 0, y >= 0, -x - y + N >= 0)", 0);
+
+ count = computePolytopeGeneratingFunction(poly);
+ // There is only one chamber: p ≥ 0
+ EXPECT_EQ(count.size(), 4u);
+
+ gf = count[0].second;
+ EXPECT_EQ_REPR_GENERATINGFUNCTION(
+ gf, GeneratingFunction(
+ 1, {1, 1, 1},
+ {makeFracMatrix(2, 2, {{0, 1}, {0, 0}}),
+ makeFracMatrix(2, 2, {{0, 1}, {0, 0}}),
+ makeFracMatrix(2, 2, {{0, 0}, {0, 0}})},
+ {{{-1, 1}, {-1, 0}}, {{1, -1}, {0, -1}}, {{1, 0}, {0, 1}}}));
+
+ // Cartesian product of a cube with side M and a right triangle with side N.
+ poly = parseRelationFromSet(
+ "(x, y, z, w, a)[M, N] : (x >= 0, y >= 0, z >= 0, -x + M >= 0, -y + M >= "
+ "0, -z + M >= 0, w >= 0, a >= 0, -w - a + N >= 0)",
+ 0);
+
+ count = computePolytopeGeneratingFunction(poly);
+
+ EXPECT_EQ(count.size(), 25u);
+
+ gf = count[0].second;
+ EXPECT_EQ(gf.getNumerators().size(), 24u);
+}
diff --git a/openmp/runtime/src/kmp_csupport.cpp b/openmp/runtime/src/kmp_csupport.cpp
index 9eeaeb8..878e78b 100644
--- a/openmp/runtime/src/kmp_csupport.cpp
+++ b/openmp/runtime/src/kmp_csupport.cpp
@@ -1533,8 +1533,9 @@ void __kmpc_critical_with_hint(ident_t *loc, kmp_int32 global_tid,
kmp_dyna_lockseq_t lockseq = __kmp_map_hint_to_lock(hint);
if (*lk == 0) {
if (KMP_IS_D_LOCK(lockseq)) {
- KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)crit, 0,
- KMP_GET_D_TAG(lockseq));
+ KMP_COMPARE_AND_STORE_ACQ32(
+ (volatile kmp_int32 *)&((kmp_base_tas_lock_t *)crit)->poll, 0,
+ KMP_GET_D_TAG(lockseq));
} else {
__kmp_init_indirect_csptr(crit, loc, global_tid, KMP_GET_I_TAG(lockseq));
}
diff --git a/openmp/runtime/src/kmp_gsupport.cpp b/openmp/runtime/src/kmp_gsupport.cpp
index 8818965..4dc8a90 100644
--- a/openmp/runtime/src/kmp_gsupport.cpp
+++ b/openmp/runtime/src/kmp_gsupport.cpp
@@ -144,7 +144,7 @@ void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_BARRIER)(void) {
// Mutual exclusion
-// The symbol that icc/ifort generates for unnamed for unnamed critical sections
+// The symbol that icc/ifort generates for unnamed critical sections
// - .gomp_critical_user_ - is defined using .comm in any objects reference it.
// We can't reference it directly here in C code, as the symbol contains a ".".
//
diff --git a/openmp/runtime/src/kmp_lock.cpp b/openmp/runtime/src/kmp_lock.cpp
index 85c54f4..0ad14f8 100644
--- a/openmp/runtime/src/kmp_lock.cpp
+++ b/openmp/runtime/src/kmp_lock.cpp
@@ -2689,7 +2689,7 @@ void __kmp_spin_backoff(kmp_backoff_t *boff) {
// lock word.
static void __kmp_init_direct_lock(kmp_dyna_lock_t *lck,
kmp_dyna_lockseq_t seq) {
- TCW_4(*lck, KMP_GET_D_TAG(seq));
+ TCW_4(((kmp_base_tas_lock_t *)lck)->poll, KMP_GET_D_TAG(seq));
KA_TRACE(
20,
("__kmp_init_direct_lock: initialized direct lock with type#%d\n", seq));
@@ -3180,8 +3180,8 @@ kmp_indirect_lock_t *__kmp_allocate_indirect_lock(void **user_lock,
lck->type = tag;
if (OMP_LOCK_T_SIZE < sizeof(void *)) {
- *((kmp_lock_index_t *)user_lock) = idx
- << 1; // indirect lock word must be even
+ *(kmp_lock_index_t *)&(((kmp_base_tas_lock_t *)user_lock)->poll) =
+ idx << 1; // indirect lock word must be even
} else {
*((kmp_indirect_lock_t **)user_lock) = lck;
}
diff --git a/openmp/runtime/src/kmp_lock.h b/openmp/runtime/src/kmp_lock.h
index f21179b..e2a0cda 100644
--- a/openmp/runtime/src/kmp_lock.h
+++ b/openmp/runtime/src/kmp_lock.h
@@ -50,7 +50,7 @@ typedef struct ident ident_t;
// recent versions), but we are bounded by the pointer-sized chunks that
// the Intel compiler allocates.
-#if KMP_OS_LINUX && defined(KMP_GOMP_COMPAT)
+#if (KMP_OS_LINUX || KMP_OS_AIX) && defined(KMP_GOMP_COMPAT)
#define OMP_LOCK_T_SIZE sizeof(int)
#define OMP_NEST_LOCK_T_SIZE sizeof(void *)
#else
@@ -120,8 +120,15 @@ extern void __kmp_validate_locks(void);
struct kmp_base_tas_lock {
// KMP_LOCK_FREE(tas) => unlocked; locked: (gtid+1) of owning thread
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ && __LP64__
+ // Flip the ordering of the high and low 32-bit member to be consistent
+ // with the memory layout of the address in 64-bit big-endian.
+ kmp_int32 depth_locked; // depth locked, for nested locks only
+ std::atomic<kmp_int32> poll;
+#else
std::atomic<kmp_int32> poll;
kmp_int32 depth_locked; // depth locked, for nested locks only
+#endif
};
typedef struct kmp_base_tas_lock kmp_base_tas_lock_t;
@@ -1138,11 +1145,13 @@ extern int (**__kmp_indirect_test)(kmp_user_lock_p, kmp_int32);
// Extracts direct lock tag from a user lock pointer
#define KMP_EXTRACT_D_TAG(l) \
- (*((kmp_dyna_lock_t *)(l)) & ((1 << KMP_LOCK_SHIFT) - 1) & \
- -(*((kmp_dyna_lock_t *)(l)) & 1))
+ ((kmp_dyna_lock_t)((kmp_base_tas_lock_t *)(l))->poll & \
+ ((1 << KMP_LOCK_SHIFT) - 1) & \
+ -((kmp_dyna_lock_t)((kmp_tas_lock_t *)(l))->lk.poll & 1))
// Extracts indirect lock index from a user lock pointer
-#define KMP_EXTRACT_I_INDEX(l) (*(kmp_lock_index_t *)(l) >> 1)
+#define KMP_EXTRACT_I_INDEX(l) \
+ ((kmp_lock_index_t)((kmp_base_tas_lock_t *)(l))->poll >> 1)
// Returns function pointer to the direct lock function with l (kmp_dyna_lock_t
// *) and op (operation type).
diff --git a/polly/lib/Transform/ScheduleOptimizer.cpp b/polly/lib/Transform/ScheduleOptimizer.cpp
index 8ee2b66..5a0ea3b 100644
--- a/polly/lib/Transform/ScheduleOptimizer.cpp
+++ b/polly/lib/Transform/ScheduleOptimizer.cpp
@@ -868,23 +868,14 @@ static void runIslScheduleOptimizer(
SC = SC.set_validity(Validity);
SC = SC.set_coincidence(Validity);
- // Save error handling behavior
- long MaxOperations = isl_ctx_get_max_operations(Ctx);
- isl_ctx_set_max_operations(Ctx, ScheduleComputeOut);
- Schedule = SC.compute_schedule();
- bool ScheduleQuota = false;
- if (isl_ctx_last_error(Ctx) == isl_error_quota) {
- isl_ctx_reset_error(Ctx);
- LLVM_DEBUG(
- dbgs() << "Schedule optimizer calculation exceeds ISL quota\n");
- ScheduleQuota = true;
- }
- isl_options_set_on_error(Ctx, ISL_ON_ERROR_ABORT);
- isl_ctx_reset_operations(Ctx);
- isl_ctx_set_max_operations(Ctx, MaxOperations);
+ {
+ IslMaxOperationsGuard MaxOpGuard(Ctx, ScheduleComputeOut);
+ Schedule = SC.compute_schedule();
- if (ScheduleQuota)
- return;
+ if (MaxOpGuard.hasQuotaExceeded())
+ LLVM_DEBUG(
+ dbgs() << "Schedule optimizer calculation exceeds ISL quota\n");
+ }
isl_options_set_on_error(Ctx, OnErrorStatus);
diff --git a/polly/test/ScheduleOptimizer/schedule_computeout.ll b/polly/test/ScheduleOptimizer/schedule_computeout.ll
index eb59f0e..acc8601 100644
--- a/polly/test/ScheduleOptimizer/schedule_computeout.ll
+++ b/polly/test/ScheduleOptimizer/schedule_computeout.ll
@@ -1,8 +1,8 @@
-; RUN: opt %loadPolly -S -polly-optree -polly-delicm -polly-opt-isl -polly-schedule-computeout=100000 -debug-only="polly-opt-isl" < %s 2>&1 | FileCheck %s
+; RUN: opt %loadPolly -S -polly-optree -polly-delicm -polly-opt-isl -polly-schedule-computeout=10000 -debug-only="polly-opt-isl" < %s 2>&1 | FileCheck %s
; REQUIRES: asserts
; Bailout if the computations of schedule compute exceeds the max scheduling quota.
-; Max compute out is initialized to 300000, Here it is set to 100000 for test purpose.
+; Max compute out is initialized to 300000, Here it is set to 10000 for test purpose.
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
target triple = "aarch64-unknown-linux-gnu"
diff --git a/utils/bazel/llvm-project-overlay/clang/BUILD.bazel b/utils/bazel/llvm-project-overlay/clang/BUILD.bazel
index b5de786..a6b0e1e 100644
--- a/utils/bazel/llvm-project-overlay/clang/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/clang/BUILD.bazel
@@ -1843,6 +1843,7 @@ cc_library(
":driver",
":driver_options_inc_gen",
":edit",
+ ":install_api",
":lex",
":parse",
":sema",
@@ -1862,6 +1863,7 @@ cc_library(
"//llvm:Support",
"//llvm:Target",
"//llvm:TargetParser",
+ "//llvm:TextAPI",
"//llvm:config",
],
)
@@ -2053,6 +2055,24 @@ cc_library(
)
cc_library(
+ name = "install_api",
+ srcs = glob([
+ "lib/InstallAPI/*.cpp",
+ ]),
+ hdrs = glob([
+ "include/clang/InstallAPI/*.h",
+ ]),
+ includes = ["include"],
+ deps = [
+ ":ast",
+ ":basic",
+ ":support",
+ "//llvm:Support",
+ "//llvm:TextAPI",
+ ],
+)
+
+cc_library(
name = "serialization",
srcs = [
"include/clang/Serialization/AttrPCHRead.inc",
diff --git a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
index 30c180b..1518d791 100644
--- a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
@@ -296,6 +296,7 @@ libc_support_library(
"src/__support/CPP/type_traits/is_convertible.h",
"src/__support/CPP/type_traits/is_destructible.h",
"src/__support/CPP/type_traits/is_enum.h",
+ "src/__support/CPP/type_traits/is_fixed_point.h",
"src/__support/CPP/type_traits/is_floating_point.h",
"src/__support/CPP/type_traits/is_function.h",
"src/__support/CPP/type_traits/is_integral.h",
@@ -332,6 +333,7 @@ libc_support_library(
":__support_macros_attributes",
":__support_macros_config",
":__support_macros_properties_float",
+ ":llvm_libc_macros_stdfix_macros",
],
)
@@ -473,6 +475,15 @@ libc_support_library(
)
libc_support_library(
+ name = "__support_integer_literals",
+ hdrs = ["src/__support/integer_literals.h"],
+ deps = [
+ ":__support_cpp_limits",
+ ":__support_uint128",
+ ],
+)
+
+libc_support_library(
name = "__support_str_to_num_result",
hdrs = ["src/__support/str_to_num_result.h"],
deps = [":__support_macros_attributes"],
@@ -713,6 +724,7 @@ libc_support_library(
":__support_cpp_bit",
":__support_cpp_limits",
":__support_cpp_type_traits",
+ ":__support_fputil_dyadic_float",
":__support_fputil_fp_bits",
":__support_fputil_nearest_integer_operations",
":__support_fputil_normal_float",
@@ -970,6 +982,12 @@ libc_support_library(
)
libc_support_library(
+ name = "llvm_libc_macros_stdfix_macros",
+ hdrs = ["include/llvm-libc-macros/stdfix-macros.h"],
+ deps = [":llvm_libc_macros_float_macros"],
+)
+
+libc_support_library(
name = "llvm_libc_types_float128",
hdrs = ["include/llvm-libc-types/float128.h"],
deps = [":llvm_libc_macros_float_macros"],
diff --git a/utils/bazel/llvm-project-overlay/libc/test/src/__support/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/test/src/__support/BUILD.bazel
index 22f4d03..e691d3c 100644
--- a/utils/bazel/llvm-project-overlay/libc/test/src/__support/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/libc/test/src/__support/BUILD.bazel
@@ -99,3 +99,11 @@ libc_test(
"//libc:__support_char_vector",
],
)
+
+libc_test(
+ name = "integer_literals_test",
+ srcs = ["integer_literals_test.cpp"],
+ deps = [
+ "//libc:__support_integer_literals",
+ ],
+)
diff --git a/utils/bazel/llvm-project-overlay/libc/test/src/__support/FPUtil/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/test/src/__support/FPUtil/BUILD.bazel
index 461d512..76443fc 100644
--- a/utils/bazel/llvm-project-overlay/libc/test/src/__support/FPUtil/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/libc/test/src/__support/FPUtil/BUILD.bazel
@@ -16,6 +16,7 @@ libc_test(
deps = [
"//libc:__support_fputil_fp_bits",
"//libc:__support_fputil_fpbits_str",
+ "//libc:__support_integer_literals",
],
)
diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
index 821481e..54c9f19 100644
--- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
@@ -1042,6 +1042,25 @@ cc_binary(
)
cc_binary(
+ name = "_mlirDialectsLLVM.so",
+ srcs = ["lib/Bindings/Python/DialectLLVM.cpp"],
+ copts = PYBIND11_COPTS,
+ features = PYBIND11_FEATURES,
+ linkshared = 1,
+ linkstatic = 0,
+ tags = [
+ "manual", # External dependency
+ "nobuildkite",
+ ],
+ deps = [
+ ":CAPIIR",
+ ":CAPILLVM",
+ ":MLIRBindingsPythonHeadersAndDeps",
+ "@pybind11",
+ ],
+)
+
+cc_binary(
name = "_mlirDialectsQuant.so",
srcs = ["lib/Bindings/Python/DialectQuant.cpp"],
copts = PYBIND11_COPTS,
@@ -1511,6 +1530,57 @@ cc_library(
],
)
+cc_library(
+ name = "AMDGPUTransformOps",
+ srcs = glob([
+ "lib/Dialect/AMDGPU/TransformOps/*.cpp",
+ ]),
+ hdrs = glob([
+ "include/mlir/Dialect/AMDGPU/TransformOps/*.h",
+ ]),
+ includes = ["include"],
+ deps = [
+ ":AMDGPUDialect",
+ ":AMDGPUTransformOpsIncGen",
+ ":AMDGPUTransforms",
+ ":AffineDialect",
+ ":FuncDialect",
+ ":IR",
+ ":TransformDialect",
+ ":VectorDialect",
+ ],
+)
+
+td_library(
+ name = "AMDGPUTransformOpsTdFiles",
+ srcs = glob([
+ "include/mlir/Dialect/AMDGPU/TransformOps/*.td",
+ ]),
+ includes = ["include"],
+ deps = [
+ ":TransformDialectTdFiles",
+ ],
+)
+
+gentbl_cc_library(
+ name = "AMDGPUTransformOpsIncGen",
+ tbl_outs = [
+ (
+ ["-gen-op-decls"],
+ "include/mlir/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.h.inc",
+ ),
+ (
+ ["-gen-op-defs"],
+ "include/mlir/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.cpp.inc",
+ ),
+ ],
+ tblgen = ":mlir-tblgen",
+ td_file = "include/mlir/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.td",
+ deps = [
+ ":AMDGPUTransformOpsTdFiles",
+ ],
+)
+
gentbl_cc_library(
name = "AMDGPUPassIncGen",
tbl_outs = [
@@ -4614,6 +4684,7 @@ cc_library(
name = "AllExtensions",
hdrs = ["include/mlir/InitAllExtensions.h"],
deps = [
+ ":AMDGPUTransformOps",
":AffineTransformOps",
":ArithToLLVM",
":BufferizationTransformOps",
@@ -8961,6 +9032,7 @@ cc_library(
deps = [
":AMDGPUDialect",
":AMDGPUToROCDL",
+ ":AMDGPUTransformOps",
":AMDGPUTransforms",
":AMXDialect",
":AMXTransforms",